diff --git a/README.md b/README.md
index 849e84eab..02d38860f 100644
--- a/README.md
+++ b/README.md
@@ -68,7 +68,6 @@ see the appropriate collector readme.
| [docker_engine](https://github.com/netdata/go.d.plugin/tree/master/modules/docker_engine) | Docker Engine |
| [dockerhub](https://github.com/netdata/go.d.plugin/tree/master/modules/dockerhub) | Docker Hub |
| [elasticsearch](https://github.com/netdata/go.d.plugin/tree/master/modules/elasticsearch) | Elasticsearch/OpenSearch |
-| [energid](https://github.com/netdata/go.d.plugin/tree/master/modules/energid) | Energi Core |
| [envoy](https://github.com/netdata/go.d.plugin/tree/master/modules/envoy) | Envoy |
| [example](https://github.com/netdata/go.d.plugin/tree/master/modules/example) | - |
| [filecheck](https://github.com/netdata/go.d.plugin/tree/master/modules/filecheck) | Files and Directories |
@@ -110,9 +109,7 @@ see the appropriate collector readme.
| [redis](https://github.com/netdata/go.d.plugin/tree/master/modules/redis) | Redis |
| [scaleio](https://github.com/netdata/go.d.plugin/tree/master/modules/scaleio) | Dell EMC ScaleIO |
| [SNMP](https://github.com/netdata/go.d.plugin/blob/master/modules/snmp) | SNMP |
-| [solr](https://github.com/netdata/go.d.plugin/tree/master/modules/solr) | Solr |
| [squidlog](https://github.com/netdata/go.d.plugin/tree/master/modules/squidlog) | Squid |
-| [springboot2](https://github.com/netdata/go.d.plugin/tree/master/modules/springboot2) | Spring Boot2 |
| [supervisord](https://github.com/netdata/go.d.plugin/tree/master/modules/supervisord) | Supervisor |
| [systemdunits](https://github.com/netdata/go.d.plugin/tree/master/modules/systemdunits) | Systemd unit state |
| [tengine](https://github.com/netdata/go.d.plugin/tree/master/modules/tengine) | Tengine |
diff --git a/agent/agent.go b/agent/agent.go
index 9d6a85f91..570807ea1 100644
--- a/agent/agent.go
+++ b/agent/agent.go
@@ -32,16 +32,17 @@ var isTerminal = isatty.IsTerminal(os.Stdout.Fd())
// Config is an Agent configuration.
type Config struct {
- Name string
- ConfDir []string
- ModulesConfDir []string
- ModulesSDConfPath []string
- VnodesConfDir []string
- StateFile string
- LockDir string
- ModuleRegistry module.Registry
- RunModule string
- MinUpdateEvery int
+ Name string
+ ConfDir []string
+ ModulesConfDir []string
+ ModulesConfSDDir []string
+ ModulesConfWatchPath []string
+ VnodesConfDir []string
+ StateFile string
+ LockDir string
+ ModuleRegistry module.Registry
+ RunModule string
+ MinUpdateEvery int
}
// Agent represents orchestrator.
@@ -51,6 +52,7 @@ type Agent struct {
Name string
ConfDir multipath.MultiPath
ModulesConfDir multipath.MultiPath
+ ModulesConfSDDir multipath.MultiPath
ModulesSDConfPath []string
VnodesConfDir multipath.MultiPath
StateFile string
@@ -72,7 +74,8 @@ func New(cfg Config) *Agent {
Name: cfg.Name,
ConfDir: cfg.ConfDir,
ModulesConfDir: cfg.ModulesConfDir,
- ModulesSDConfPath: cfg.ModulesSDConfPath,
+ ModulesConfSDDir: cfg.ModulesConfSDDir,
+ ModulesSDConfPath: cfg.ModulesConfWatchPath,
VnodesConfDir: cfg.VnodesConfDir,
StateFile: cfg.StateFile,
LockDir: cfg.LockDir,
@@ -96,11 +99,9 @@ func serve(a *Agent) {
var wg sync.WaitGroup
var exit bool
- var reload bool
for {
ctx, cancel := context.WithCancel(context.Background())
- ctx = context.WithValue(ctx, "reload", reload)
wg.Add(1)
go func() { defer wg.Done(); a.run(ctx) }()
@@ -136,7 +137,6 @@ func serve(a *Agent) {
os.Exit(0)
}
- reload = true
time.Sleep(time.Second)
}
}
@@ -169,7 +169,7 @@ func (a *Agent) run(ctx context.Context) {
discCfg := a.buildDiscoveryConf(enabledModules)
- discoveryManager, err := discovery.NewManager(discCfg)
+ discMgr, err := discovery.NewManager(discCfg)
if err != nil {
a.Error(err)
if isTerminal {
@@ -178,46 +178,32 @@ func (a *Agent) run(ctx context.Context) {
return
}
- functionsManager := functions.NewManager()
-
- jobsManager := jobmgr.NewManager()
- jobsManager.PluginName = a.Name
- jobsManager.Out = a.Out
- jobsManager.Modules = enabledModules
-
- // TODO: API will be changed in https://github.com/netdata/netdata/pull/16702
- //if logger.Level.Enabled(slog.LevelDebug) {
- // dyncfgDiscovery, _ := dyncfg.NewDiscovery(dyncfg.Config{
- // Plugin: a.Name,
- // API: netdataapi.New(a.Out),
- // Modules: enabledModules,
- // ModuleConfigDefaults: discCfg.Registry,
- // Functions: functionsManager,
- // })
- //
- // discoveryManager.Add(dyncfgDiscovery)
- //
- // jobsManager.Dyncfg = dyncfgDiscovery
- //}
+ fnMgr := functions.NewManager()
+
+ jobMgr := jobmgr.New()
+ jobMgr.PluginName = a.Name
+ jobMgr.Out = a.Out
+ jobMgr.Modules = enabledModules
+ jobMgr.FnReg = fnMgr
if reg := a.setupVnodeRegistry(); reg == nil || reg.Len() == 0 {
vnodes.Disabled = true
} else {
- jobsManager.Vnodes = reg
+ jobMgr.Vnodes = reg
}
if a.LockDir != "" {
- jobsManager.FileLock = filelock.New(a.LockDir)
+ jobMgr.FileLock = filelock.New(a.LockDir)
}
- var statusSaveManager *filestatus.Manager
+ var fsMgr *filestatus.Manager
if !isTerminal && a.StateFile != "" {
- statusSaveManager = filestatus.NewManager(a.StateFile)
- jobsManager.StatusSaver = statusSaveManager
+ fsMgr = filestatus.NewManager(a.StateFile)
+ jobMgr.FileStatus = fsMgr
if store, err := filestatus.LoadStore(a.StateFile); err != nil {
a.Warningf("couldn't load state file: %v", err)
} else {
- jobsManager.StatusStore = store
+ jobMgr.FileStatusStore = store
}
}
@@ -225,17 +211,17 @@ func (a *Agent) run(ctx context.Context) {
var wg sync.WaitGroup
wg.Add(1)
- go func() { defer wg.Done(); functionsManager.Run(ctx) }()
+ go func() { defer wg.Done(); fnMgr.Run(ctx) }()
wg.Add(1)
- go func() { defer wg.Done(); jobsManager.Run(ctx, in) }()
+ go func() { defer wg.Done(); jobMgr.Run(ctx, in) }()
wg.Add(1)
- go func() { defer wg.Done(); discoveryManager.Run(ctx, in) }()
+ go func() { defer wg.Done(); discMgr.Run(ctx, in) }()
- if statusSaveManager != nil {
+ if fsMgr != nil {
wg.Add(1)
- go func() { defer wg.Done(); statusSaveManager.Run(ctx) }()
+ go func() { defer wg.Done(); fsMgr.Run(ctx) }()
}
wg.Wait()
diff --git a/agent/agent_test.go b/agent/agent_test.go
index 2a15a6b73..242584f4b 100644
--- a/agent/agent_test.go
+++ b/agent/agent_test.go
@@ -21,14 +21,14 @@ func TestNew(t *testing.T) {
func TestAgent_Run(t *testing.T) {
a := New(Config{
- Name: "",
- ConfDir: nil,
- ModulesConfDir: nil,
- ModulesSDConfPath: nil,
- StateFile: "",
- ModuleRegistry: nil,
- RunModule: "",
- MinUpdateEvery: 0,
+ Name: "",
+ ConfDir: nil,
+ ModulesConfDir: nil,
+ ModulesConfWatchPath: nil,
+ StateFile: "",
+ ModuleRegistry: nil,
+ RunModule: "",
+ MinUpdateEvery: 0,
})
var buf bytes.Buffer
@@ -74,17 +74,17 @@ func prepareRegistry(mux *sync.Mutex, stats map[string]int, names ...string) mod
func prepareMockModule(name string, mux *sync.Mutex, stats map[string]int) module.Module {
return &module.MockModule{
- InitFunc: func() bool {
+ InitFunc: func() error {
mux.Lock()
defer mux.Unlock()
stats[name+"_init"]++
- return true
+ return nil
},
- CheckFunc: func() bool {
+ CheckFunc: func() error {
mux.Lock()
defer mux.Unlock()
stats[name+"_check"]++
- return true
+ return nil
},
ChartsFunc: func() *module.Charts {
mux.Lock()
diff --git a/agent/confgroup/cache.go b/agent/confgroup/cache.go
deleted file mode 100644
index 40c8071d5..000000000
--- a/agent/confgroup/cache.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package confgroup
-
-func NewCache() *Cache {
- return &Cache{
- hashes: make(map[uint64]uint),
- sources: make(map[string]map[uint64]Config),
- }
-}
-
-type Cache struct {
- hashes map[uint64]uint // map[cfgHash]cfgCount
- sources map[string]map[uint64]Config // map[cfgSource]map[cfgHash]cfg
-}
-
-func (c *Cache) Add(group *Group) (added, removed []Config) {
- if group == nil {
- return nil, nil
- }
-
- if len(group.Configs) == 0 {
- return c.addEmpty(group)
- }
-
- return c.addNotEmpty(group)
-}
-
-func (c *Cache) addEmpty(group *Group) (added, removed []Config) {
- set, ok := c.sources[group.Source]
- if !ok {
- return nil, nil
- }
-
- for hash, cfg := range set {
- c.hashes[hash]--
- if c.hashes[hash] == 0 {
- removed = append(removed, cfg)
- }
- delete(set, hash)
- }
-
- delete(c.sources, group.Source)
-
- return nil, removed
-}
-
-func (c *Cache) addNotEmpty(group *Group) (added, removed []Config) {
- set, ok := c.sources[group.Source]
- if !ok {
- set = make(map[uint64]Config)
- c.sources[group.Source] = set
- }
-
- seen := make(map[uint64]struct{})
-
- for _, cfg := range group.Configs {
- hash := cfg.Hash()
- seen[hash] = struct{}{}
-
- if _, ok := set[hash]; ok {
- continue
- }
-
- set[hash] = cfg
- if c.hashes[hash] == 0 {
- added = append(added, cfg)
- }
- c.hashes[hash]++
- }
-
- if !ok {
- return added, nil
- }
-
- for hash, cfg := range set {
- if _, ok := seen[hash]; ok {
- continue
- }
-
- delete(set, hash)
- c.hashes[hash]--
- if c.hashes[hash] == 0 {
- removed = append(removed, cfg)
- }
- }
-
- if ok && len(set) == 0 {
- delete(c.sources, group.Source)
- }
-
- return added, removed
-}
diff --git a/agent/confgroup/cache_test.go b/agent/confgroup/cache_test.go
deleted file mode 100644
index a2bbd4919..000000000
--- a/agent/confgroup/cache_test.go
+++ /dev/null
@@ -1,134 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package confgroup
-
-import (
- "sort"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-func TestConfigCache_Add(t *testing.T) {
- tests := map[string]struct {
- prepareGroups []Group
- groups []Group
- expectedAdd []Config
- expectedRemove []Config
- }{
- "new group, new configs": {
- groups: []Group{
- prepareGroup("source", prepareCfg("name", "module")),
- },
- expectedAdd: []Config{
- prepareCfg("name", "module"),
- },
- },
- "several equal updates for the same group": {
- groups: []Group{
- prepareGroup("source", prepareCfg("name", "module")),
- prepareGroup("source", prepareCfg("name", "module")),
- prepareGroup("source", prepareCfg("name", "module")),
- prepareGroup("source", prepareCfg("name", "module")),
- prepareGroup("source", prepareCfg("name", "module")),
- },
- expectedAdd: []Config{
- prepareCfg("name", "module"),
- },
- },
- "empty group update for cached group": {
- prepareGroups: []Group{
- prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
- },
- groups: []Group{
- prepareGroup("source"),
- },
- expectedRemove: []Config{
- prepareCfg("name1", "module"),
- prepareCfg("name2", "module"),
- },
- },
- "changed group update for cached group": {
- prepareGroups: []Group{
- prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
- },
- groups: []Group{
- prepareGroup("source", prepareCfg("name2", "module")),
- },
- expectedRemove: []Config{
- prepareCfg("name1", "module"),
- },
- },
- "empty group update for uncached group": {
- groups: []Group{
- prepareGroup("source"),
- prepareGroup("source"),
- },
- },
- "several updates with different source but same context": {
- groups: []Group{
- prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
- prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
- },
- expectedAdd: []Config{
- prepareCfg("name1", "module"),
- prepareCfg("name2", "module"),
- },
- },
- "have equal configs from 2 sources, get empty group for the 1st source": {
- prepareGroups: []Group{
- prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
- prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")),
- },
- groups: []Group{
- prepareGroup("source2"),
- },
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- cache := NewCache()
-
- for _, group := range test.prepareGroups {
- cache.Add(&group)
- }
-
- var added, removed []Config
- for _, group := range test.groups {
- a, r := cache.Add(&group)
- added = append(added, a...)
- removed = append(removed, r...)
- }
-
- sortConfigs(added)
- sortConfigs(removed)
- sortConfigs(test.expectedAdd)
- sortConfigs(test.expectedRemove)
-
- assert.Equalf(t, test.expectedAdd, added, "added configs")
- assert.Equalf(t, test.expectedRemove, removed, "removed configs")
- })
- }
-}
-
-func prepareGroup(source string, cfgs ...Config) Group {
- return Group{
- Configs: cfgs,
- Source: source,
- }
-}
-
-func prepareCfg(name, module string) Config {
- return Config{
- "name": name,
- "module": module,
- }
-}
-
-func sortConfigs(cfgs []Config) {
- if len(cfgs) == 0 {
- return
- }
- sort.Slice(cfgs, func(i, j int) bool { return cfgs[i].FullName() < cfgs[j].FullName() })
-}
diff --git a/agent/confgroup/config.go b/agent/confgroup/config.go
new file mode 100644
index 000000000..a9208e487
--- /dev/null
+++ b/agent/confgroup/config.go
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package confgroup
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/agent/hostinfo"
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/ilyam8/hashstructure"
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ keyName = "name"
+ keyModule = "module"
+ keyUpdateEvery = "update_every"
+ keyDetectRetry = "autodetection_retry"
+ keyPriority = "priority"
+ keyLabels = "labels"
+ keyVnode = "vnode"
+
+ ikeySource = "__source__"
+ ikeySourceType = "__source_type__"
+ ikeyProvider = "__provider__"
+)
+
+const (
+ TypeStock = "stock"
+ TypeUser = "user"
+ TypeDiscovered = "discovered"
+ TypeDyncfg = "dyncfg"
+)
+
+type Config map[string]any
+
+func (c Config) HashIncludeMap(_ string, k, _ any) (bool, error) {
+ s := k.(string)
+ return !(strings.HasPrefix(s, "__") || strings.HasSuffix(s, "__")), nil
+}
+
+func (c Config) Set(key string, value any) Config { c[key] = value; return c }
+func (c Config) Get(key string) any { return c[key] }
+
+func (c Config) Name() string { v, _ := c.Get(keyName).(string); return v }
+func (c Config) Module() string { v, _ := c.Get(keyModule).(string); return v }
+func (c Config) FullName() string { return fullName(c.Name(), c.Module()) }
+func (c Config) UpdateEvery() int { v, _ := c.Get(keyUpdateEvery).(int); return v }
+func (c Config) AutoDetectionRetry() int { v, _ := c.Get(keyDetectRetry).(int); return v }
+func (c Config) Priority() int { v, _ := c.Get(keyPriority).(int); return v }
+func (c Config) Labels() map[any]any { v, _ := c.Get(keyLabels).(map[any]any); return v }
+func (c Config) Hash() uint64 { return calcHash(c) }
+func (c Config) Vnode() string { v, _ := c.Get(keyVnode).(string); return v }
+
+func (c Config) SetName(v string) Config { return c.Set(keyName, v) }
+func (c Config) SetModule(v string) Config { return c.Set(keyModule, v) }
+
+func (c Config) UID() string {
+ return fmt.Sprintf("%s_%s_%s_%s_%d", c.SourceType(), c.Provider(), c.Source(), c.FullName(), c.Hash())
+}
+
+func (c Config) Source() string { v, _ := c.Get(ikeySource).(string); return v }
+func (c Config) SourceType() string { v, _ := c.Get(ikeySourceType).(string); return v }
+func (c Config) Provider() string { v, _ := c.Get(ikeyProvider).(string); return v }
+func (c Config) SetSource(v string) Config { return c.Set(ikeySource, v) }
+func (c Config) SetSourceType(v string) Config { return c.Set(ikeySourceType, v) }
+func (c Config) SetProvider(v string) Config { return c.Set(ikeyProvider, v) }
+
+func (c Config) SourceTypePriority() int {
+ switch c.SourceType() {
+ default:
+ return 0
+ case TypeStock:
+ return 2
+ case TypeDiscovered:
+ return 4
+ case TypeUser:
+ return 8
+ case TypeDyncfg:
+ return 16
+ }
+}
+
+func (c Config) Clone() (Config, error) {
+ type plain Config
+ bytes, err := yaml.Marshal((plain)(c))
+ if err != nil {
+ return nil, err
+ }
+ var newConfig Config
+ if err := yaml.Unmarshal(bytes, &newConfig); err != nil {
+ return nil, err
+ }
+ return newConfig, nil
+}
+
+func (c Config) ApplyDefaults(def Default) {
+ if c.UpdateEvery() <= 0 {
+ v := firstPositive(def.UpdateEvery, module.UpdateEvery)
+ c.Set("update_every", v)
+ }
+ if c.AutoDetectionRetry() <= 0 {
+ v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry)
+ c.Set("autodetection_retry", v)
+ }
+ if c.Priority() <= 0 {
+ v := firstPositive(def.Priority, module.Priority)
+ c.Set("priority", v)
+ }
+ if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 {
+ c.Set("update_every", def.MinUpdateEvery)
+ }
+ if c.Name() == "" {
+ c.Set("name", c.Module())
+ } else {
+ c.Set("name", cleanName(jobNameResolveHostname(c.Name())))
+ }
+
+ if v, ok := c.Get("url").(string); ok {
+ c.Set("url", urlResolveHostname(v))
+ }
+}
+
+var reInvalidCharacters = regexp.MustCompile(`\s+|\.+`)
+
+func cleanName(name string) string {
+ return reInvalidCharacters.ReplaceAllString(name, "_")
+}
+
+func fullName(name, module string) string {
+ if name == module {
+ return name
+ }
+ return module + "_" + name
+}
+
+func calcHash(obj any) uint64 {
+ hash, _ := hashstructure.Hash(obj, nil)
+ return hash
+}
+
+func firstPositive(value int, others ...int) int {
+ if value > 0 || len(others) == 0 {
+ return value
+ }
+ return firstPositive(others[0], others[1:]...)
+}
+
+func urlResolveHostname(rawURL string) string {
+ if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") {
+ return rawURL
+ }
+
+ u, err := url.Parse(rawURL)
+ if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) {
+ return rawURL
+ }
+
+ u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1)
+
+ return u.String()
+}
+
+func jobNameResolveHostname(name string) string {
+ if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") {
+ return name
+ }
+
+ if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") {
+ return name
+ }
+
+ return strings.Replace(name, "hostname", hostinfo.Hostname, 1)
+}
diff --git a/agent/confgroup/group_test.go b/agent/confgroup/config_test.go
similarity index 99%
rename from agent/confgroup/group_test.go
rename to agent/confgroup/config_test.go
index af9a804e8..beac8e61b 100644
--- a/agent/confgroup/group_test.go
+++ b/agent/confgroup/config_test.go
@@ -316,7 +316,7 @@ func TestConfig_Apply(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
- test.origCfg.Apply(test.def)
+ test.origCfg.ApplyDefaults(test.def)
assert.Equal(t, test.expectedCfg, test.origCfg)
})
diff --git a/agent/confgroup/group.go b/agent/confgroup/group.go
index 649a145d7..b8e7bd775 100644
--- a/agent/confgroup/group.go
+++ b/agent/confgroup/group.go
@@ -2,126 +2,7 @@
package confgroup
-import (
- "fmt"
- "net/url"
- "regexp"
- "strings"
-
- "github.com/netdata/go.d.plugin/agent/hostinfo"
- "github.com/netdata/go.d.plugin/agent/module"
-
- "github.com/ilyam8/hashstructure"
-)
-
type Group struct {
Configs []Config
Source string
}
-
-type Config map[string]interface{}
-
-func (c Config) HashIncludeMap(_ string, k, _ interface{}) (bool, error) {
- s := k.(string)
- return !(strings.HasPrefix(s, "__") && strings.HasSuffix(s, "__")), nil
-}
-
-func (c Config) NameWithHash() string { return fmt.Sprintf("%s_%d", c.Name(), c.Hash()) }
-func (c Config) Name() string { v, _ := c.get("name").(string); return v }
-func (c Config) Module() string { v, _ := c.get("module").(string); return v }
-func (c Config) FullName() string { return fullName(c.Name(), c.Module()) }
-func (c Config) UpdateEvery() int { v, _ := c.get("update_every").(int); return v }
-func (c Config) AutoDetectionRetry() int { v, _ := c.get("autodetection_retry").(int); return v }
-func (c Config) Priority() int { v, _ := c.get("priority").(int); return v }
-func (c Config) Labels() map[any]any { v, _ := c.get("labels").(map[any]any); return v }
-func (c Config) Hash() uint64 { return calcHash(c) }
-func (c Config) Source() string { v, _ := c.get("__source__").(string); return v }
-func (c Config) Provider() string { v, _ := c.get("__provider__").(string); return v }
-func (c Config) Vnode() string { v, _ := c.get("vnode").(string); return v }
-
-func (c Config) SetName(v string) { c.set("name", v) }
-func (c Config) SetModule(v string) { c.set("module", v) }
-func (c Config) SetSource(v string) { c.set("__source__", v) }
-func (c Config) SetProvider(v string) { c.set("__provider__", v) }
-
-func (c Config) set(key string, value interface{}) { c[key] = value }
-func (c Config) get(key string) interface{} { return c[key] }
-
-func (c Config) Apply(def Default) {
- if c.UpdateEvery() <= 0 {
- v := firstPositive(def.UpdateEvery, module.UpdateEvery)
- c.set("update_every", v)
- }
- if c.AutoDetectionRetry() <= 0 {
- v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry)
- c.set("autodetection_retry", v)
- }
- if c.Priority() <= 0 {
- v := firstPositive(def.Priority, module.Priority)
- c.set("priority", v)
- }
- if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 {
- c.set("update_every", def.MinUpdateEvery)
- }
- if c.Name() == "" {
- c.set("name", c.Module())
- } else {
- c.set("name", cleanName(jobNameResolveHostname(c.Name())))
- }
-
- if v, ok := c.get("url").(string); ok {
- c.set("url", urlResolveHostname(v))
- }
-}
-
-func cleanName(name string) string {
- return reInvalidCharacters.ReplaceAllString(name, "_")
-}
-
-var reInvalidCharacters = regexp.MustCompile(`\s+|\.+`)
-
-func fullName(name, module string) string {
- if name == module {
- return name
- }
- return module + "_" + name
-}
-
-func calcHash(obj interface{}) uint64 {
- hash, _ := hashstructure.Hash(obj, nil)
- return hash
-}
-
-func firstPositive(value int, others ...int) int {
- if value > 0 || len(others) == 0 {
- return value
- }
- return firstPositive(others[0], others[1:]...)
-}
-
-func urlResolveHostname(rawURL string) string {
- if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") {
- return rawURL
- }
-
- u, err := url.Parse(rawURL)
- if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) {
- return rawURL
- }
-
- u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1)
-
- return u.String()
-}
-
-func jobNameResolveHostname(name string) string {
- if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") {
- return name
- }
-
- if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") {
- return name
- }
-
- return strings.Replace(name, "hostname", hostinfo.Hostname, 1)
-}
diff --git a/agent/discovery/config.go b/agent/discovery/config.go
index d19770d35..ea1a70854 100644
--- a/agent/discovery/config.go
+++ b/agent/discovery/config.go
@@ -8,12 +8,14 @@ import (
"github.com/netdata/go.d.plugin/agent/confgroup"
"github.com/netdata/go.d.plugin/agent/discovery/dummy"
"github.com/netdata/go.d.plugin/agent/discovery/file"
+ "github.com/netdata/go.d.plugin/agent/discovery/sd"
)
type Config struct {
Registry confgroup.Registry
File file.Config
Dummy dummy.Config
+ SD sd.Config
}
func validateConfig(cfg Config) error {
diff --git a/agent/discovery/dummy/discovery.go b/agent/discovery/dummy/discovery.go
index acd0b8f1c..c770aca72 100644
--- a/agent/discovery/dummy/discovery.go
+++ b/agent/discovery/dummy/discovery.go
@@ -17,7 +17,8 @@ func NewDiscovery(cfg Config) (*Discovery, error) {
}
d := &Discovery{
Logger: logger.New().With(
- slog.String("component", "discovery dummy"),
+ slog.String("component", "discovery"),
+ slog.String("discoverer", "dummy"),
),
reg: cfg.Registry,
names: cfg.Names,
@@ -65,15 +66,18 @@ func (d *Discovery) newCfgGroup(name string) *confgroup.Group {
return nil
}
+ src := "internal"
cfg := confgroup.Config{}
cfg.SetModule(name)
- cfg.SetSource(name)
+ cfg.SetSource(src)
+ cfg.SetSourceType(confgroup.TypeStock)
cfg.SetProvider("dummy")
- cfg.Apply(def)
+ cfg.ApplyDefaults(def)
group := &confgroup.Group{
Configs: []confgroup.Config{cfg},
- Source: name,
+ Source: src,
}
+
return group
}
diff --git a/agent/discovery/dyncfg/config.go b/agent/discovery/dyncfg/config.go
deleted file mode 100644
index ebda00f50..000000000
--- a/agent/discovery/dyncfg/config.go
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package dyncfg
-
-import (
- "github.com/netdata/go.d.plugin/agent/confgroup"
- "github.com/netdata/go.d.plugin/agent/functions"
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-type Config struct {
- Plugin string
- API NetdataDyncfgAPI
- Functions FunctionRegistry
- Modules module.Registry
- ModuleConfigDefaults confgroup.Registry
-}
-
-type NetdataDyncfgAPI interface {
- DynCfgEnable(string) error
- DynCfgReset() error
- DyncCfgRegisterModule(string) error
- DynCfgRegisterJob(_, _, _ string) error
- DynCfgReportJobStatus(_, _, _, _ string) error
- FunctionResultSuccess(_, _, _ string) error
- FunctionResultReject(_, _, _ string) error
-}
-
-type FunctionRegistry interface {
- Register(name string, reg func(functions.Function))
-}
-
-func validateConfig(cfg Config) error {
- return nil
-}
diff --git a/agent/discovery/dyncfg/dyncfg.go b/agent/discovery/dyncfg/dyncfg.go
deleted file mode 100644
index 2f3c34234..000000000
--- a/agent/discovery/dyncfg/dyncfg.go
+++ /dev/null
@@ -1,256 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package dyncfg
-
-import (
- "bytes"
- "context"
- "fmt"
- "log/slog"
- "strings"
- "sync"
-
- "github.com/netdata/go.d.plugin/agent/confgroup"
- "github.com/netdata/go.d.plugin/agent/functions"
- "github.com/netdata/go.d.plugin/agent/module"
- "github.com/netdata/go.d.plugin/logger"
-
- "gopkg.in/yaml.v2"
-)
-
-const dynCfg = "dyncfg"
-
-func NewDiscovery(cfg Config) (*Discovery, error) {
- if err := validateConfig(cfg); err != nil {
- return nil, err
- }
-
- mgr := &Discovery{
- Logger: logger.New().With(
- slog.String("component", "discovery dyncfg"),
- ),
- Plugin: cfg.Plugin,
- API: cfg.API,
- Modules: cfg.Modules,
- ModuleConfigDefaults: nil,
- mux: &sync.Mutex{},
- configs: make(map[string]confgroup.Config),
- }
-
- mgr.registerFunctions(cfg.Functions)
-
- return mgr, nil
-}
-
-type Discovery struct {
- *logger.Logger
-
- Plugin string
- API NetdataDyncfgAPI
- Modules module.Registry
- ModuleConfigDefaults confgroup.Registry
-
- in chan<- []*confgroup.Group
-
- mux *sync.Mutex
- configs map[string]confgroup.Config
-}
-
-func (d *Discovery) String() string {
- return d.Name()
-}
-
-func (d *Discovery) Name() string {
- return "dyncfg discovery"
-}
-
-func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) {
- d.Info("instance is started")
- defer func() { d.Info("instance is stopped") }()
-
- d.in = in
-
- if reload, ok := ctx.Value("reload").(bool); ok && reload {
- _ = d.API.DynCfgReset()
- }
-
- _ = d.API.DynCfgEnable(d.Plugin)
-
- for k := range d.Modules {
- _ = d.API.DyncCfgRegisterModule(k)
- }
-
- <-ctx.Done()
-}
-
-func (d *Discovery) registerFunctions(r FunctionRegistry) {
- r.Register("get_plugin_config", d.getPluginConfig)
- r.Register("get_plugin_config_schema", d.getModuleConfigSchema)
- r.Register("set_plugin_config", d.setPluginConfig)
-
- r.Register("get_module_config", d.getModuleConfig)
- r.Register("get_module_config_schema", d.getModuleConfigSchema)
- r.Register("set_module_config", d.setModuleConfig)
-
- r.Register("get_job_config", d.getJobConfig)
- r.Register("get_job_config_schema", d.getJobConfigSchema)
- r.Register("set_job_config", d.setJobConfig)
- r.Register("delete_job", d.deleteJobName)
-}
-
-func (d *Discovery) getPluginConfig(fn functions.Function) { d.notImplemented(fn) }
-func (d *Discovery) getPluginConfigSchema(fn functions.Function) { d.notImplemented(fn) }
-func (d *Discovery) setPluginConfig(fn functions.Function) { d.notImplemented(fn) }
-
-func (d *Discovery) getModuleConfig(fn functions.Function) { d.notImplemented(fn) }
-func (d *Discovery) getModuleConfigSchema(fn functions.Function) { d.notImplemented(fn) }
-func (d *Discovery) setModuleConfig(fn functions.Function) { d.notImplemented(fn) }
-
-func (d *Discovery) getJobConfig(fn functions.Function) {
- if err := d.verifyFn(fn, 2); err != nil {
- d.apiReject(fn, err.Error())
- return
- }
-
- moduleName, jobName := fn.Args[0], fn.Args[1]
-
- bs, err := d.getConfigBytes(moduleName + "_" + jobName)
- if err != nil {
- d.apiReject(fn, err.Error())
- return
- }
-
- d.apiSuccessYAML(fn, string(bs))
-}
-
-func (d *Discovery) getJobConfigSchema(fn functions.Function) {
- if err := d.verifyFn(fn, 1); err != nil {
- d.apiReject(fn, err.Error())
- return
- }
-
- name := fn.Args[0]
-
- v, ok := d.Modules[name]
- if !ok {
- msg := jsonErrorf("module %s is not registered", name)
- d.apiReject(fn, msg)
- return
- }
-
- d.apiSuccessJSON(fn, v.JobConfigSchema)
-}
-
-func (d *Discovery) setJobConfig(fn functions.Function) {
- if err := d.verifyFn(fn, 2); err != nil {
- d.apiReject(fn, err.Error())
- return
- }
-
- var cfg confgroup.Config
- if err := yaml.NewDecoder(bytes.NewBuffer(fn.Payload)).Decode(&cfg); err != nil {
- d.apiReject(fn, err.Error())
- return
- }
-
- modName, jobName := fn.Args[0], fn.Args[1]
- def, _ := d.ModuleConfigDefaults.Lookup(modName)
- src := source(modName, jobName)
-
- cfg.SetProvider(dynCfg)
- cfg.SetSource(src)
- cfg.SetModule(modName)
- cfg.SetName(jobName)
- cfg.Apply(def)
-
- d.in <- []*confgroup.Group{
- {
- Configs: []confgroup.Config{cfg},
- Source: src,
- },
- }
-
- d.apiSuccessJSON(fn, "")
-}
-
-func (d *Discovery) deleteJobName(fn functions.Function) {
- if err := d.verifyFn(fn, 2); err != nil {
- d.apiReject(fn, err.Error())
- return
- }
-
- modName, jobName := fn.Args[0], fn.Args[1]
-
- cfg, ok := d.getConfig(modName + "_" + jobName)
- if !ok {
- d.apiReject(fn, jsonErrorf("module '%s' job '%s': not registered", modName, jobName))
- return
- }
- if cfg.Provider() != dynCfg {
- d.apiReject(fn, jsonErrorf("module '%s' job '%s': can't remove non Dyncfg job", modName, jobName))
- return
- }
-
- d.in <- []*confgroup.Group{
- {
- Configs: []confgroup.Config{},
- Source: source(modName, jobName),
- },
- }
-
- d.apiSuccessJSON(fn, "")
-}
-
-func (d *Discovery) apiSuccessJSON(fn functions.Function, payload string) {
- _ = d.API.FunctionResultSuccess(fn.UID, "application/json", payload)
-}
-
-func (d *Discovery) apiSuccessYAML(fn functions.Function, payload string) {
- _ = d.API.FunctionResultSuccess(fn.UID, "application/x-yaml", payload)
-}
-
-func (d *Discovery) apiReject(fn functions.Function, msg string) {
- _ = d.API.FunctionResultReject(fn.UID, "application/json", msg)
-}
-
-func (d *Discovery) notImplemented(fn functions.Function) {
- d.Infof("not implemented: '%s'", fn.String())
- msg := jsonErrorf("function '%s' is not implemented", fn.Name)
- d.apiReject(fn, msg)
-}
-
-func (d *Discovery) verifyFn(fn functions.Function, wantArgs int) error {
- if got := len(fn.Args); got != wantArgs {
- msg := jsonErrorf("wrong number of arguments: want %d, got %d (args: '%v')", wantArgs, got, fn.Args)
- return fmt.Errorf(msg)
- }
-
- if isSetFunction(fn) && len(fn.Payload) == 0 {
- msg := jsonErrorf("no payload")
- return fmt.Errorf(msg)
- }
-
- return nil
-}
-
-func jsonErrorf(format string, a ...any) string {
- msg := fmt.Sprintf(format, a...)
- msg = strings.ReplaceAll(msg, "\n", " ")
-
- return fmt.Sprintf(`{ "error": "%s" }`+"\n", msg)
-}
-
-func source(modName, jobName string) string {
- return fmt.Sprintf("%s/%s/%s", dynCfg, modName, jobName)
-}
-
-func cfgJobName(cfg confgroup.Config) string {
- if strings.HasPrefix(cfg.Source(), "dyncfg") {
- return cfg.Name()
- }
- return cfg.NameWithHash()
-}
-
-func isSetFunction(fn functions.Function) bool {
- return strings.HasPrefix(fn.Name, "set_")
-}
diff --git a/agent/discovery/dyncfg/dyncfg_test.go b/agent/discovery/dyncfg/dyncfg_test.go
deleted file mode 100644
index 3eee1cef3..000000000
--- a/agent/discovery/dyncfg/dyncfg_test.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package dyncfg
-
-import (
- "context"
- "sync"
- "testing"
- "time"
-
- "github.com/netdata/go.d.plugin/agent/confgroup"
- "github.com/netdata/go.d.plugin/agent/functions"
- "github.com/netdata/go.d.plugin/agent/module"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-func TestNewDiscovery(t *testing.T) {
-
-}
-
-func TestDiscovery_Register(t *testing.T) {
- tests := map[string]struct {
- regConfigs []confgroup.Config
- wantApiStats *mockApi
- wantConfigs int
- }{
- "register jobs created by Dyncfg and other providers": {
- regConfigs: []confgroup.Config{
- prepareConfig(
- "__provider__", dynCfg,
- "module", "test",
- "name", "first",
- ),
- prepareConfig(
- "__provider__", "test",
- "module", "test",
- "name", "second",
- ),
- },
- wantConfigs: 2,
- wantApiStats: &mockApi{
- callsDynCfgRegisterJob: 1,
- },
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- var mock mockApi
- d := &Discovery{
- API: &mock,
- mux: &sync.Mutex{},
- configs: make(map[string]confgroup.Config),
- }
-
- for _, v := range test.regConfigs {
- d.Register(v)
- }
-
- assert.Equal(t, test.wantApiStats, &mock)
- assert.Equal(t, test.wantConfigs, len(d.configs))
- })
- }
-}
-
-func TestDiscovery_Unregister(t *testing.T) {
- tests := map[string]struct {
- regConfigs []confgroup.Config
- unregConfigs []confgroup.Config
- wantApiStats *mockApi
- wantConfigs int
- }{
- "register/unregister jobs created by Dyncfg and other providers": {
- wantConfigs: 0,
- wantApiStats: &mockApi{
- callsDynCfgRegisterJob: 1,
- },
- regConfigs: []confgroup.Config{
- prepareConfig(
- "__provider__", dynCfg,
- "module", "test",
- "name", "first",
- ),
- prepareConfig(
- "__provider__", "test",
- "module", "test",
- "name", "second",
- ),
- },
- unregConfigs: []confgroup.Config{
- prepareConfig(
- "__provider__", dynCfg,
- "module", "test",
- "name", "first",
- ),
- prepareConfig(
- "__provider__", "test",
- "module", "test",
- "name", "second",
- ),
- },
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- var mock mockApi
- d := &Discovery{
- API: &mock,
- mux: &sync.Mutex{},
- configs: make(map[string]confgroup.Config),
- }
-
- for _, v := range test.regConfigs {
- d.Register(v)
- }
- for _, v := range test.unregConfigs {
- d.Unregister(v)
- }
-
- assert.Equal(t, test.wantApiStats, &mock)
- assert.Equal(t, test.wantConfigs, len(d.configs))
- })
- }
-}
-
-func TestDiscovery_UpdateStatus(t *testing.T) {
-
-}
-
-func TestDiscovery_Run(t *testing.T) {
- tests := map[string]struct {
- wantApiStats *mockApi
- }{
- "default run": {
- wantApiStats: &mockApi{
- callsDynCfgEnable: 1,
- callsDyncCfgRegisterModule: 2,
- callsRegister: 10,
- },
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- var mock mockApi
- d, err := NewDiscovery(Config{
- Plugin: "test",
- API: &mock,
- Functions: &mock,
- Modules: module.Registry{
- "module1": module.Creator{},
- "module2": module.Creator{},
- },
- ModuleConfigDefaults: nil,
- })
- require.Nil(t, err)
-
- testTime := time.Second * 3
- ctx, cancel := context.WithTimeout(context.Background(), testTime)
- defer cancel()
-
- in := make(chan<- []*confgroup.Group)
- done := make(chan struct{})
-
- go func() { defer close(done); d.Run(ctx, in) }()
-
- timeout := testTime + time.Second*2
- tk := time.NewTimer(timeout)
- defer tk.Stop()
-
- select {
- case <-done:
- assert.Equal(t, test.wantApiStats, &mock)
- case <-tk.C:
- t.Errorf("timed out after %s", timeout)
- }
- })
- }
-}
-
-type mockApi struct {
- callsDynCfgEnable int
- callsDyncCfgRegisterModule int
- callsDynCfgRegisterJob int
- callsDynCfgReportJobStatus int
- callsFunctionResultSuccess int
- callsFunctionResultReject int
-
- callsRegister int
-}
-
-func (m *mockApi) Register(string, func(functions.Function)) {
- m.callsRegister++
-}
-
-func (m *mockApi) DynCfgEnable(string) error {
- m.callsDynCfgEnable++
- return nil
-}
-
-func (m *mockApi) DynCfgReset() error {
- return nil
-}
-
-func (m *mockApi) DyncCfgRegisterModule(string) error {
- m.callsDyncCfgRegisterModule++
- return nil
-}
-
-func (m *mockApi) DynCfgRegisterJob(_, _, _ string) error {
- m.callsDynCfgRegisterJob++
- return nil
-}
-
-func (m *mockApi) DynCfgReportJobStatus(_, _, _, _ string) error {
- m.callsDynCfgReportJobStatus++
- return nil
-}
-
-func (m *mockApi) FunctionResultSuccess(_, _, _ string) error {
- m.callsFunctionResultSuccess++
- return nil
-}
-
-func (m *mockApi) FunctionResultReject(_, _, _ string) error {
- m.callsFunctionResultReject++
- return nil
-}
-
-func prepareConfig(values ...string) confgroup.Config {
- cfg := confgroup.Config{}
- for i := 1; i < len(values); i += 2 {
- cfg[values[i-1]] = values[i]
- }
- return cfg
-}
diff --git a/agent/discovery/dyncfg/ext.go b/agent/discovery/dyncfg/ext.go
deleted file mode 100644
index 910475c3d..000000000
--- a/agent/discovery/dyncfg/ext.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package dyncfg
-
-import (
- "errors"
- "os"
- "strings"
-
- "github.com/netdata/go.d.plugin/agent/confgroup"
-
- "gopkg.in/yaml.v2"
-)
-
-func (d *Discovery) Register(cfg confgroup.Config) {
- name := cfgJobName(cfg)
- if cfg.Provider() != dynCfg {
- // jobType handling in ND is not documented
- _ = d.API.DynCfgRegisterJob(cfg.Module(), name, "stock")
- }
-
- key := cfg.Module() + "_" + name
- d.addConfig(key, cfg)
-}
-
-func (d *Discovery) Unregister(cfg confgroup.Config) {
- key := cfg.Module() + "_" + cfgJobName(cfg)
- d.removeConfig(key)
-}
-
-func (d *Discovery) UpdateStatus(cfg confgroup.Config, status, payload string) {
- _ = d.API.DynCfgReportJobStatus(cfg.Module(), cfgJobName(cfg), status, payload)
-}
-
-func (d *Discovery) addConfig(name string, cfg confgroup.Config) {
- d.mux.Lock()
- defer d.mux.Unlock()
-
- d.configs[name] = cfg
-}
-
-func (d *Discovery) removeConfig(key string) {
- d.mux.Lock()
- defer d.mux.Unlock()
-
- delete(d.configs, key)
-}
-
-func (d *Discovery) getConfig(key string) (confgroup.Config, bool) {
- d.mux.Lock()
- defer d.mux.Unlock()
-
- v, ok := d.configs[key]
- return v, ok
-}
-
-func (d *Discovery) getConfigBytes(key string) ([]byte, error) {
- d.mux.Lock()
- defer d.mux.Unlock()
-
- cfg, ok := d.configs[key]
- if !ok {
- return nil, errors.New("config not found")
- }
-
- bs, err := yaml.Marshal(cfg)
- if err != nil {
- return nil, err
- }
-
- return bs, nil
-}
-
-var envNDStockConfigDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR")
-
-func isStock(cfg confgroup.Config) bool {
- if envNDStockConfigDir == "" {
- return false
- }
- return strings.HasPrefix(cfg.Source(), envNDStockConfigDir)
-}
diff --git a/agent/discovery/file/discovery.go b/agent/discovery/file/discovery.go
index 028644dd4..ec272a007 100644
--- a/agent/discovery/file/discovery.go
+++ b/agent/discovery/file/discovery.go
@@ -14,7 +14,8 @@ import (
)
var log = logger.New().With(
- slog.String("component", "discovery file"),
+ slog.String("component", "discovery"),
+ slog.String("discoverer", "file"),
)
func NewDiscovery(cfg Config) (*Discovery, error) {
diff --git a/agent/discovery/file/parse.go b/agent/discovery/file/parse.go
index b6ba52372..21b732fba 100644
--- a/agent/discovery/file/parse.go
+++ b/agent/discovery/file/parse.go
@@ -58,15 +58,18 @@ func parseStaticFormat(reg confgroup.Registry, path string, bs []byte) (*confgro
if err := yaml.Unmarshal(bs, &modCfg); err != nil {
return nil, err
}
+
for _, cfg := range modCfg.Jobs {
cfg.SetModule(name)
def := mergeDef(modCfg.Default, modDef)
- cfg.Apply(def)
+ cfg.ApplyDefaults(def)
}
+
group := &confgroup.Group{
Configs: modCfg.Jobs,
Source: path,
}
+
return group, nil
}
@@ -79,7 +82,7 @@ func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.G
var i int
for _, cfg := range cfgs {
if def, ok := reg.Lookup(cfg.Module()); ok && cfg.Module() != "" {
- cfg.Apply(def)
+ cfg.ApplyDefaults(def)
cfgs[i] = cfg
i++
}
@@ -89,6 +92,7 @@ func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.G
Configs: cfgs[:i],
Source: path,
}
+
return group, nil
}
@@ -102,8 +106,8 @@ func cfgFormat(bs []byte) format {
}
type (
- static = map[interface{}]interface{}
- sd = []interface{}
+ static = map[any]any
+ sd = []any
)
switch data.(type) {
case static:
diff --git a/agent/discovery/file/parse_test.go b/agent/discovery/file/parse_test.go
index e18d43013..ce498cea3 100644
--- a/agent/discovery/file/parse_test.go
+++ b/agent/discovery/file/parse_test.go
@@ -18,377 +18,405 @@ func TestParse(t *testing.T) {
cfgDef = 22
modDef = 33
)
- tests := map[string]func(t *testing.T, tmp *tmpDir){
- "static, default: +job +conf +module": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "module": {
- UpdateEvery: modDef,
- AutoDetectionRetry: modDef,
- Priority: modDef,
- },
- }
- cfg := staticConfig{
- Default: confgroup.Default{
- UpdateEvery: cfgDef,
- AutoDetectionRetry: cfgDef,
- Priority: cfgDef,
- },
- Jobs: []confgroup.Config{
- {
- "name": "name",
- "update_every": jobDef,
- "autodetection_retry": jobDef,
- "priority": jobDef,
+ tests := map[string]struct {
+ test func(t *testing.T, tmp *tmpDir)
+ }{
+ "static, default: +job +conf +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
},
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": jobDef,
- "autodetection_retry": jobDef,
- "priority": jobDef,
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ UpdateEvery: cfgDef,
+ AutoDetectionRetry: cfgDef,
+ Priority: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
},
- },
- }
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "static, default: +job +conf +module (merge all)": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "module": {
- Priority: modDef,
- },
- }
- cfg := staticConfig{
- Default: confgroup.Default{
- AutoDetectionRetry: cfgDef,
- },
- Jobs: []confgroup.Config{
- {
- "name": "name",
- "update_every": jobDef,
+ "static, default: +job +conf +module (merge all)": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ Priority: modDef,
},
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": jobDef,
- "autodetection_retry": cfgDef,
- "priority": modDef,
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ AutoDetectionRetry: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ "update_every": jobDef,
+ },
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": jobDef,
+ "autodetection_retry": cfgDef,
+ "priority": modDef,
+ },
},
- },
- }
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "static, default: -job +conf +module": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "module": {
- UpdateEvery: modDef,
- AutoDetectionRetry: modDef,
- Priority: modDef,
- },
- }
- cfg := staticConfig{
- Default: confgroup.Default{
- UpdateEvery: cfgDef,
- AutoDetectionRetry: cfgDef,
- Priority: cfgDef,
- },
- Jobs: []confgroup.Config{
- {
- "name": "name",
+ "static, default: -job +conf +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
},
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": cfgDef,
- "autodetection_retry": cfgDef,
- "priority": cfgDef,
+ }
+ cfg := staticConfig{
+ Default: confgroup.Default{
+ UpdateEvery: cfgDef,
+ AutoDetectionRetry: cfgDef,
+ Priority: cfgDef,
+ },
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
},
- },
- }
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": cfgDef,
+ "autodetection_retry": cfgDef,
+ "priority": cfgDef,
+ },
+ },
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "static, default: -job -conf +module": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "module": {
- UpdateEvery: modDef,
- AutoDetectionRetry: modDef,
- Priority: modDef,
- },
- }
- cfg := staticConfig{
- Jobs: []confgroup.Config{
- {
- "name": "name",
+ "static, default: -job -conf +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
},
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "autodetection_retry": modDef,
- "priority": modDef,
- "update_every": modDef,
+ }
+ cfg := staticConfig{
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
},
- },
- }
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "autodetection_retry": modDef,
+ "priority": modDef,
+ "update_every": modDef,
+ },
+ },
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "static, default: -job -conf -module (+global)": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "module": {},
- }
- cfg := staticConfig{
- Jobs: []confgroup.Config{
- {
- "name": "name",
+ "static, default: -job -conf -module (+global)": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := staticConfig{
+ Jobs: []confgroup.Config{
+ {
+ "name": "name",
+ },
},
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "update_every": module.UpdateEvery,
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "update_every": module.UpdateEvery,
+ },
},
- },
- }
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "sd, default: +job +module": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "sd_module": {
- UpdateEvery: modDef,
- AutoDetectionRetry: modDef,
- Priority: modDef,
- },
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "sd_module",
- "update_every": jobDef,
- "autodetection_retry": jobDef,
- "priority": jobDef,
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
+ "sd, default: +job +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := sdConfig{
{
- "module": "sd_module",
"name": "name",
+ "module": "sd_module",
"update_every": jobDef,
"autodetection_retry": jobDef,
"priority": jobDef,
},
- },
- }
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "module": "sd_module",
+ "name": "name",
+ "update_every": jobDef,
+ "autodetection_retry": jobDef,
+ "priority": jobDef,
+ },
+ },
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "sd, default: -job +module": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "sd_module": {
- UpdateEvery: modDef,
- AutoDetectionRetry: modDef,
- Priority: modDef,
- },
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "sd_module",
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
+ "sd, default: -job +module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {
+ UpdateEvery: modDef,
+ AutoDetectionRetry: modDef,
+ Priority: modDef,
+ },
+ }
+ cfg := sdConfig{
{
- "name": "name",
- "module": "sd_module",
- "update_every": modDef,
- "autodetection_retry": modDef,
- "priority": modDef,
+ "name": "name",
+ "module": "sd_module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": modDef,
+ "autodetection_retry": modDef,
+ "priority": modDef,
+ },
},
- },
- }
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "sd, default: -job -module (+global)": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "sd_module": {},
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "sd_module",
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{
+ "sd, default: -job -module (+global)": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
{
- "name": "name",
- "module": "sd_module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
+ "name": "name",
+ "module": "sd_module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "sd_module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ },
},
- },
- }
+ }
- group, err := parse(reg, filename)
+ group, err := parse(reg, filename)
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "sd, job has no 'module' or 'module' is empty": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "sd_module": {},
- }
- cfg := sdConfig{
- {
- "name": "name",
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{},
- }
-
- group, err := parse(reg, filename)
-
- require.NoError(t, err)
- assert.Equal(t, expected, group)
- },
- "conf registry has no module": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "sd_module": {},
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "module",
- },
- }
- filename := tmp.join("module.conf")
- tmp.writeYAML(filename, cfg)
-
- expected := &confgroup.Group{
- Source: filename,
- Configs: []confgroup.Config{},
- }
-
- group, err := parse(reg, filename)
-
- require.NoError(t, err)
- assert.Equal(t, expected, group)
+ "sd, job has no 'module' or 'module' is empty": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
+
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{},
+ }
+
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
},
- "empty file": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{
- "module": {},
- }
+ "conf registry has no module": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "sd_module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ tmp.writeYAML(filename, cfg)
- filename := tmp.createFile("empty-*")
- group, err := parse(reg, filename)
+ expected := &confgroup.Group{
+ Source: filename,
+ Configs: []confgroup.Config{},
+ }
- assert.Nil(t, group)
- require.NoError(t, err)
+ group, err := parse(reg, filename)
+
+ require.NoError(t, err)
+ assert.Equal(t, expected, group)
+ },
+ },
+ "empty file": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+
+ filename := tmp.createFile("empty-*")
+ group, err := parse(reg, filename)
+
+ assert.Nil(t, group)
+ require.NoError(t, err)
+ },
},
- "only comments, unknown empty format": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{}
+ "only comments, unknown empty format": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{}
- filename := tmp.createFile("unknown-empty-format-*")
- tmp.writeString(filename, "# a comment")
- group, err := parse(reg, filename)
+ filename := tmp.createFile("unknown-empty-format-*")
+ tmp.writeString(filename, "# a comment")
+ group, err := parse(reg, filename)
- assert.Nil(t, group)
- assert.NoError(t, err)
+ assert.Nil(t, group)
+ assert.NoError(t, err)
+ },
},
- "unknown format": func(t *testing.T, tmp *tmpDir) {
- reg := confgroup.Registry{}
+ "unknown format": {
+ test: func(t *testing.T, tmp *tmpDir) {
+ reg := confgroup.Registry{}
- filename := tmp.createFile("unknown-format-*")
- tmp.writeYAML(filename, "unknown")
- group, err := parse(reg, filename)
+ filename := tmp.createFile("unknown-format-*")
+ tmp.writeYAML(filename, "unknown")
+ group, err := parse(reg, filename)
- assert.Nil(t, group)
- assert.Error(t, err)
+ assert.Nil(t, group)
+ assert.Error(t, err)
+ },
},
}
@@ -396,7 +424,8 @@ func TestParse(t *testing.T) {
t.Run(name, func(t *testing.T) {
tmp := newTmpDir(t, "parse-file-*")
defer tmp.cleanup()
- scenario(t, tmp)
+
+ scenario.test(t, tmp)
})
}
}
diff --git a/agent/discovery/file/read.go b/agent/discovery/file/read.go
index 3d27955ad..531b125d6 100644
--- a/agent/discovery/file/read.go
+++ b/agent/discovery/file/read.go
@@ -4,8 +4,10 @@ package file
import (
"context"
+ "fmt"
"os"
"path/filepath"
+ "strings"
"github.com/netdata/go.d.plugin/agent/confgroup"
"github.com/netdata/go.d.plugin/logger"
@@ -71,19 +73,26 @@ func (r *Reader) groups() (groups []*confgroup.Group) {
r.Warningf("parse '%s': %v", path, err)
continue
}
+
if group == nil {
group = &confgroup.Group{Source: path}
+ } else {
+ for _, cfg := range group.Configs {
+ cfg.SetProvider("file reader")
+ cfg.SetSourceType(configSourceType(path))
+ cfg.SetSource(fmt.Sprintf("discoverer=file_reader,file=%s", path))
+ }
}
groups = append(groups, group)
}
}
- for _, group := range groups {
- for _, cfg := range group.Configs {
- cfg.SetSource(group.Source)
- cfg.SetProvider(r.Name())
- }
- }
-
return groups
}
+
+func configSourceType(path string) string {
+ if strings.Contains(path, "/etc/netdata") {
+ return "user"
+ }
+ return "stock"
+}
diff --git a/agent/discovery/file/read_test.go b/agent/discovery/file/read_test.go
index 2bfa20a77..bb71ef81a 100644
--- a/agent/discovery/file/read_test.go
+++ b/agent/discovery/file/read_test.go
@@ -3,6 +3,7 @@
package file
import (
+ "fmt"
"testing"
"github.com/netdata/go.d.plugin/agent/confgroup"
@@ -27,73 +28,89 @@ func TestNewReader(t *testing.T) {
}
for name, test := range tests {
- t.Run(name, func(t *testing.T) { assert.NotNil(t, NewReader(test.reg, test.paths)) })
+ t.Run(name, func(t *testing.T) {
+ assert.NotNil(t, NewReader(test.reg, test.paths))
+ })
}
}
func TestReader_Run(t *testing.T) {
- tmp := newTmpDir(t, "reader-run-*")
- defer tmp.cleanup()
+ tests := map[string]struct {
+ createSim func(tmp *tmpDir) discoverySim
+ }{
+ "read multiple files": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ module1 := tmp.join("module1.conf")
+ module2 := tmp.join("module2.conf")
+ module3 := tmp.join("module3.conf")
- module1 := tmp.join("module1.conf")
- module2 := tmp.join("module2.conf")
- module3 := tmp.join("module3.conf")
+ tmp.writeYAML(module1, staticConfig{
+ Jobs: []confgroup.Config{{"name": "name"}},
+ })
+ tmp.writeYAML(module2, staticConfig{
+ Jobs: []confgroup.Config{{"name": "name"}},
+ })
+ tmp.writeString(module3, "# a comment")
- tmp.writeYAML(module1, staticConfig{
- Jobs: []confgroup.Config{{"name": "name"}},
- })
- tmp.writeYAML(module2, staticConfig{
- Jobs: []confgroup.Config{{"name": "name"}},
- })
- tmp.writeString(module3, "# a comment")
+ reg := confgroup.Registry{
+ "module1": {},
+ "module2": {},
+ "module3": {},
+ }
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Read: []string{module1, module2, module3},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: module1,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module1",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file reader",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_reader,file=%s", module1),
+ },
+ },
+ },
+ {
+ Source: module2,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module2",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file reader",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_reader,file=%s", module2),
+ },
+ },
+ },
+ {
+ Source: module3,
+ },
+ }
- reg := confgroup.Registry{
- "module1": {},
- "module2": {},
- "module3": {},
- }
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Read: []string{module1, module2, module3},
- })
- expected := []*confgroup.Group{
- {
- Source: module1,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module1",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": module1,
- "__provider__": "file reader",
- },
- },
- },
- {
- Source: module2,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module2",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": module2,
- "__provider__": "file reader",
- },
+ return discoverySim{
+ discovery: discovery,
+ expectedGroups: expected,
+ }
},
},
- {
- Source: module3,
- },
}
- sim := discoverySim{
- discovery: discovery,
- expectedGroups: expected,
- }
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ tmp := newTmpDir(t, "reader-run-*")
+ defer tmp.cleanup()
- sim.run(t)
+ test.createSim(tmp).run(t)
+ })
+ }
}
diff --git a/agent/discovery/file/watch.go b/agent/discovery/file/watch.go
index e33aac3ec..72eb9274e 100644
--- a/agent/discovery/file/watch.go
+++ b/agent/discovery/file/watch.go
@@ -4,6 +4,7 @@ package file
import (
"context"
+ "fmt"
"os"
"path/filepath"
"strings"
@@ -150,6 +151,11 @@ func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) {
} else if group == nil {
groups = append(groups, &confgroup.Group{Source: file})
} else {
+ for _, cfg := range group.Configs {
+ cfg.SetProvider("file watcher")
+ cfg.SetSourceType(configSourceType(file))
+ cfg.SetSource(fmt.Sprintf("discoverer=file_watcher,file=%s", file))
+ }
groups = append(groups, group)
}
}
@@ -162,14 +168,8 @@ func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) {
groups = append(groups, &confgroup.Group{Source: name})
}
- for _, group := range groups {
- for _, cfg := range group.Configs {
- cfg.SetSource(group.Source)
- cfg.SetProvider("file watcher")
- }
- }
-
send(ctx, in, groups)
+
w.watchDirs()
}
@@ -202,7 +202,6 @@ func (w *Watcher) stop() {
}
}()
- // in fact never returns an error
_ = w.watcher.Close()
}
diff --git a/agent/discovery/file/watch_test.go b/agent/discovery/file/watch_test.go
index 1450b7bb6..b8f8e5871 100644
--- a/agent/discovery/file/watch_test.go
+++ b/agent/discovery/file/watch_test.go
@@ -3,6 +3,7 @@
package file
import (
+ "fmt"
"testing"
"time"
@@ -28,325 +29,350 @@ func TestNewWatcher(t *testing.T) {
}
for name, test := range tests {
- t.Run(name, func(t *testing.T) { assert.NotNil(t, NewWatcher(test.reg, test.paths)) })
+ t.Run(name, func(t *testing.T) {
+ assert.NotNil(t, NewWatcher(test.reg, test.paths))
+ })
}
}
func TestWatcher_Run(t *testing.T) {
- tests := map[string]func(tmp *tmpDir) discoverySim{
- "file exists before start": func(tmp *tmpDir) discoverySim {
- reg := confgroup.Registry{
- "module": {},
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "module",
- },
- }
- filename := tmp.join("module.conf")
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Watch: []string{tmp.join("*.conf")},
- })
- expected := []*confgroup.Group{
- {
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": filename,
- "__provider__": "file watcher",
+ tests := map[string]struct {
+ createSim func(tmp *tmpDir) discoverySim
+ }{
+ "file exists before start": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
},
},
- },
- }
+ }
- sim := discoverySim{
- discovery: discovery,
- beforeRun: func() {
- tmp.writeYAML(filename, cfg)
- },
- expectedGroups: expected,
- }
- return sim
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
},
- "empty file": func(tmp *tmpDir) discoverySim {
- reg := confgroup.Registry{
- "module": {},
- }
- filename := tmp.join("module.conf")
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Watch: []string{tmp.join("*.conf")},
- })
- expected := []*confgroup.Group{
- {
- Source: filename,
- },
- }
+ "empty file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ },
+ }
- sim := discoverySim{
- discovery: discovery,
- beforeRun: func() {
- tmp.writeString(filename, "")
- },
- expectedGroups: expected,
- }
- return sim
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeString(filename, "")
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
},
- "only comments, no data": func(tmp *tmpDir) discoverySim {
- reg := confgroup.Registry{
- "module": {},
- }
- filename := tmp.join("module.conf")
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Watch: []string{tmp.join("*.conf")},
- })
- expected := []*confgroup.Group{
- {
- Source: filename,
- },
- }
+ "only comments, no data": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ },
+ }
- sim := discoverySim{
- discovery: discovery,
- beforeRun: func() {
- tmp.writeString(filename, "# a comment")
- },
- expectedGroups: expected,
- }
- return sim
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeString(filename, "# a comment")
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
},
- "add file": func(tmp *tmpDir) discoverySim {
- reg := confgroup.Registry{
- "module": {},
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "module",
- },
- }
- filename := tmp.join("module.conf")
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Watch: []string{tmp.join("*.conf")},
- })
- expected := []*confgroup.Group{
- {
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": filename,
- "__provider__": "file watcher",
+ "add file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
},
},
- },
- }
+ }
- sim := discoverySim{
- discovery: discovery,
- afterRun: func() {
- tmp.writeYAML(filename, cfg)
- },
- expectedGroups: expected,
- }
- return sim
+ sim := discoverySim{
+ discovery: discovery,
+ afterRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
},
- "remove file": func(tmp *tmpDir) discoverySim {
- reg := confgroup.Registry{
- "module": {},
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "module",
- },
- }
- filename := tmp.join("module.conf")
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Watch: []string{tmp.join("*.conf")},
- })
- expected := []*confgroup.Group{
- {
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": filename,
- "__provider__": "file watcher",
+ "remove file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
},
},
- },
- {
- Source: filename,
- Configs: nil,
- },
- }
+ {
+ Source: filename,
+ Configs: nil,
+ },
+ }
- sim := discoverySim{
- discovery: discovery,
- beforeRun: func() {
- tmp.writeYAML(filename, cfg)
- },
- afterRun: func() {
- tmp.removeFile(filename)
- },
- expectedGroups: expected,
- }
- return sim
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ afterRun: func() {
+ tmp.removeFile(filename)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
},
- "change file": func(tmp *tmpDir) discoverySim {
- reg := confgroup.Registry{
- "module": {},
- }
- cfgOrig := sdConfig{
- {
- "name": "name",
- "module": "module",
- },
- }
- cfgChanged := sdConfig{
- {
- "name": "name_changed",
- "module": "module",
- },
- }
- filename := tmp.join("module.conf")
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Watch: []string{tmp.join("*.conf")},
- })
- expected := []*confgroup.Group{
- {
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": filename,
- "__provider__": "file watcher",
+ "change file": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfgOrig := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ cfgChanged := sdConfig{
+ {
+ "name": "name_changed",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
},
},
- },
- {
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name_changed",
- "module": "module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": filename,
- "__provider__": "file watcher",
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name_changed",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
},
},
- },
- }
+ }
- sim := discoverySim{
- discovery: discovery,
- beforeRun: func() {
- tmp.writeYAML(filename, cfgOrig)
- },
- afterRun: func() {
- tmp.writeYAML(filename, cfgChanged)
- time.Sleep(time.Millisecond * 500)
- },
- expectedGroups: expected,
- }
- return sim
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfgOrig)
+ },
+ afterRun: func() {
+ tmp.writeYAML(filename, cfgChanged)
+ time.Sleep(time.Millisecond * 500)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
},
- "vim 'backupcopy=no' (writing to a file and backup)": func(tmp *tmpDir) discoverySim {
- reg := confgroup.Registry{
- "module": {},
- }
- cfg := sdConfig{
- {
- "name": "name",
- "module": "module",
- },
- }
- filename := tmp.join("module.conf")
- discovery := prepareDiscovery(t, Config{
- Registry: reg,
- Watch: []string{tmp.join("*.conf")},
- })
- expected := []*confgroup.Group{
- {
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": filename,
- "__provider__": "file watcher",
+ "vim 'backupcopy=no' (writing to a file and backup)": {
+ createSim: func(tmp *tmpDir) discoverySim {
+ reg := confgroup.Registry{
+ "module": {},
+ }
+ cfg := sdConfig{
+ {
+ "name": "name",
+ "module": "module",
+ },
+ }
+ filename := tmp.join("module.conf")
+ discovery := prepareDiscovery(t, Config{
+ Registry: reg,
+ Watch: []string{tmp.join("*.conf")},
+ })
+ expected := []*confgroup.Group{
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": confgroup.TypeStock,
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
},
},
- },
- {
- Source: filename,
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "module",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- "__source__": filename,
- "__provider__": "file watcher",
+ {
+ Source: filename,
+ Configs: []confgroup.Config{
+ {
+ "name": "name",
+ "module": "module",
+ "update_every": module.UpdateEvery,
+ "autodetection_retry": module.AutoDetectionRetry,
+ "priority": module.Priority,
+ "__provider__": "file watcher",
+ "__source_type__": "stock",
+ "__source__": fmt.Sprintf("discoverer=file_watcher,file=%s", filename),
+ },
},
},
- },
- }
+ }
- sim := discoverySim{
- discovery: discovery,
- beforeRun: func() {
- tmp.writeYAML(filename, cfg)
- },
- afterRun: func() {
- newFilename := filename + ".swp"
- tmp.renameFile(filename, newFilename)
- tmp.writeYAML(filename, cfg)
- tmp.removeFile(newFilename)
- time.Sleep(time.Millisecond * 500)
- },
- expectedGroups: expected,
- }
- return sim
+ sim := discoverySim{
+ discovery: discovery,
+ beforeRun: func() {
+ tmp.writeYAML(filename, cfg)
+ },
+ afterRun: func() {
+ newFilename := filename + ".swp"
+ tmp.renameFile(filename, newFilename)
+ tmp.writeYAML(filename, cfg)
+ tmp.removeFile(newFilename)
+ time.Sleep(time.Millisecond * 500)
+ },
+ expectedGroups: expected,
+ }
+ return sim
+ },
},
}
- for name, createSim := range tests {
+ for name, test := range tests {
t.Run(name, func(t *testing.T) {
tmp := newTmpDir(t, "watch-run-*")
defer tmp.cleanup()
- createSim(tmp).run(t)
+ test.createSim(tmp).run(t)
})
}
}
diff --git a/agent/discovery/manager.go b/agent/discovery/manager.go
index 3ab1ab6af..8b698fd0b 100644
--- a/agent/discovery/manager.go
+++ b/agent/discovery/manager.go
@@ -13,6 +13,7 @@ import (
"github.com/netdata/go.d.plugin/agent/confgroup"
"github.com/netdata/go.d.plugin/agent/discovery/dummy"
"github.com/netdata/go.d.plugin/agent/discovery/file"
+ "github.com/netdata/go.d.plugin/agent/discovery/sd"
"github.com/netdata/go.d.plugin/logger"
)
@@ -56,9 +57,9 @@ func (m *Manager) String() string {
return fmt.Sprintf("discovery manager: %v", m.discoverers)
}
-func (m *Manager) Add(d discoverer) {
- m.discoverers = append(m.discoverers, d)
-}
+//func (m *Manager) Add(d discoverer) {
+// m.discoverers = append(m.discoverers, d)
+//}
func (m *Manager) Run(ctx context.Context, in chan<- []*confgroup.Group) {
m.Info("instance is started")
@@ -91,7 +92,7 @@ func (m *Manager) registerDiscoverers(cfg Config) error {
if err != nil {
return err
}
- m.Add(d)
+ m.discoverers = append(m.discoverers, d)
}
if len(cfg.Dummy.Names) > 0 {
@@ -100,7 +101,15 @@ func (m *Manager) registerDiscoverers(cfg Config) error {
if err != nil {
return err
}
- m.Add(d)
+ m.discoverers = append(m.discoverers, d)
+ }
+
+ if len(cfg.SD.ConfDir) != 0 {
+ d, err := sd.NewServiceDiscovery(cfg.SD)
+ if err != nil {
+ return err
+ }
+ m.discoverers = append(m.discoverers, d)
}
if len(m.discoverers) == 0 {
diff --git a/agent/discovery/manager_test.go b/agent/discovery/manager_test.go
index ebeba81a4..335ec3f0b 100644
--- a/agent/discovery/manager_test.go
+++ b/agent/discovery/manager_test.go
@@ -11,6 +11,7 @@ import (
"github.com/netdata/go.d.plugin/agent/confgroup"
"github.com/netdata/go.d.plugin/agent/discovery/file"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
diff --git a/agent/discovery/sd/conffile.go b/agent/discovery/sd/conffile.go
deleted file mode 100644
index 96b6f07cd..000000000
--- a/agent/discovery/sd/conffile.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package sd
-
-import (
- "context"
-
- "github.com/ilyam8/hashstructure"
-)
-
-type ConfigFileProvider interface {
- Run(ctx context.Context)
- Configs() chan ConfigFile
-}
-
-type ConfigFile struct {
- Source string
- Data []byte
-}
-
-func (c *ConfigFile) Hash() uint64 {
- h, _ := hashstructure.Hash(c, nil)
- return h
-}
diff --git a/agent/discovery/sd/kubernetes/config.go b/agent/discovery/sd/discoverer/kubernetes/config.go
similarity index 100%
rename from agent/discovery/sd/kubernetes/config.go
rename to agent/discovery/sd/discoverer/kubernetes/config.go
diff --git a/agent/discovery/sd/kubernetes/kubernetes.go b/agent/discovery/sd/discoverer/kubernetes/kubernetes.go
similarity index 98%
rename from agent/discovery/sd/kubernetes/kubernetes.go
rename to agent/discovery/sd/discoverer/kubernetes/kubernetes.go
index ba4f05851..7ac08b376 100644
--- a/agent/discovery/sd/kubernetes/kubernetes.go
+++ b/agent/discovery/sd/discoverer/kubernetes/kubernetes.go
@@ -30,7 +30,8 @@ const (
)
var log = logger.New().With(
- slog.String("component", "discovery sd k8s"),
+ slog.String("component", "service discovery"),
+ slog.String("discoverer", "kubernetes"),
)
func NewKubeDiscoverer(cfg Config) (*KubeDiscoverer, error) {
@@ -74,7 +75,7 @@ type KubeDiscoverer struct {
}
func (d *KubeDiscoverer) String() string {
- return "k8s td manager"
+ return "sd:k8s"
}
const resyncPeriod = 10 * time.Minute
diff --git a/agent/discovery/sd/kubernetes/kubernetes_test.go b/agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go
similarity index 100%
rename from agent/discovery/sd/kubernetes/kubernetes_test.go
rename to agent/discovery/sd/discoverer/kubernetes/kubernetes_test.go
diff --git a/agent/discovery/sd/kubernetes/pod.go b/agent/discovery/sd/discoverer/kubernetes/pod.go
similarity index 98%
rename from agent/discovery/sd/kubernetes/pod.go
rename to agent/discovery/sd/discoverer/kubernetes/pod.go
index a6391f7f6..d5c858cc1 100644
--- a/agent/discovery/sd/kubernetes/pod.go
+++ b/agent/discovery/sd/discoverer/kubernetes/pod.go
@@ -23,7 +23,7 @@ type podTargetGroup struct {
}
func (p podTargetGroup) Provider() string { return "sd:k8s:pod" }
-func (p podTargetGroup) Source() string { return fmt.Sprintf("%s(%s)", p.Provider(), p.source) }
+func (p podTargetGroup) Source() string { return p.source }
func (p podTargetGroup) Targets() []model.Target { return p.targets }
type PodTarget struct {
@@ -86,7 +86,7 @@ type podDiscoverer struct {
}
func (p *podDiscoverer) String() string {
- return "k8s pod"
+ return "sd:k8s:pod"
}
func (p *podDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) {
@@ -384,7 +384,7 @@ func podTUIDWithPort(pod *corev1.Pod, container corev1.Container, port corev1.Co
}
func podSourceFromNsName(namespace, name string) string {
- return namespace + "/" + name
+ return fmt.Sprintf("discoverer=k8s,kind=pod,namespace=%s,pod_name=%s", namespace, name)
}
func podSource(pod *corev1.Pod) string {
diff --git a/agent/discovery/sd/kubernetes/pod_test.go b/agent/discovery/sd/discoverer/kubernetes/pod_test.go
similarity index 99%
rename from agent/discovery/sd/kubernetes/pod_test.go
rename to agent/discovery/sd/discoverer/kubernetes/pod_test.go
index 87506243b..b759550fc 100644
--- a/agent/discovery/sd/kubernetes/pod_test.go
+++ b/agent/discovery/sd/discoverer/kubernetes/pod_test.go
@@ -44,8 +44,8 @@ func TestPodTargetGroup_Source(t *testing.T) {
}
},
wantSources: []string{
- "sd:k8s:pod(default/httpd-dd95c4d68-5bkwl)",
- "sd:k8s:pod(default/nginx-7cfd77469b-q6kxj)",
+ "discoverer=k8s,kind=pod,namespace=default,pod_name=httpd-dd95c4d68-5bkwl",
+ "discoverer=k8s,kind=pod,namespace=default,pod_name=nginx-7cfd77469b-q6kxj",
},
},
}
diff --git a/agent/discovery/sd/kubernetes/service.go b/agent/discovery/sd/discoverer/kubernetes/service.go
similarity index 96%
rename from agent/discovery/sd/kubernetes/service.go
rename to agent/discovery/sd/discoverer/kubernetes/service.go
index 975c5f84d..fbeeba8ce 100644
--- a/agent/discovery/sd/kubernetes/service.go
+++ b/agent/discovery/sd/discoverer/kubernetes/service.go
@@ -23,7 +23,7 @@ type serviceTargetGroup struct {
}
func (s serviceTargetGroup) Provider() string { return "sd:k8s:service" }
-func (s serviceTargetGroup) Source() string { return fmt.Sprintf("%s(%s)", s.Provider(), s.source) }
+func (s serviceTargetGroup) Source() string { return s.source }
func (s serviceTargetGroup) Targets() []model.Target { return s.targets }
type ServiceTarget struct {
@@ -193,7 +193,7 @@ func serviceTUID(svc *corev1.Service, port corev1.ServicePort) string {
}
func serviceSourceFromNsName(namespace, name string) string {
- return namespace + "/" + name
+ return fmt.Sprintf("discoverer=k8s,kind=service,namespace=%s,service_name=%s", namespace, name)
}
func serviceSource(svc *corev1.Service) string {
diff --git a/agent/discovery/sd/kubernetes/service_test.go b/agent/discovery/sd/discoverer/kubernetes/service_test.go
similarity index 98%
rename from agent/discovery/sd/kubernetes/service_test.go
rename to agent/discovery/sd/discoverer/kubernetes/service_test.go
index a62d66f09..cf2a6d62b 100644
--- a/agent/discovery/sd/kubernetes/service_test.go
+++ b/agent/discovery/sd/discoverer/kubernetes/service_test.go
@@ -43,8 +43,8 @@ func TestServiceTargetGroup_Source(t *testing.T) {
}
},
wantSources: []string{
- "sd:k8s:service(default/httpd-cluster-ip-service)",
- "sd:k8s:service(default/nginx-cluster-ip-service)",
+ "discoverer=k8s,kind=service,namespace=default,service_name=httpd-cluster-ip-service",
+ "discoverer=k8s,kind=service,namespace=default,service_name=nginx-cluster-ip-service",
},
},
}
diff --git a/agent/discovery/sd/kubernetes/sim_test.go b/agent/discovery/sd/discoverer/kubernetes/sim_test.go
similarity index 100%
rename from agent/discovery/sd/kubernetes/sim_test.go
rename to agent/discovery/sd/discoverer/kubernetes/sim_test.go
diff --git a/agent/discovery/sd/hostsocket/net.go b/agent/discovery/sd/discoverer/netlisteners/netlisteners.go
similarity index 64%
rename from agent/discovery/sd/hostsocket/net.go
rename to agent/discovery/sd/discoverer/netlisteners/netlisteners.go
index 18cc35b7d..7cf3f51d0 100644
--- a/agent/discovery/sd/hostsocket/net.go
+++ b/agent/discovery/sd/discoverer/netlisteners/netlisteners.go
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-package hostsocket
+package netlisteners
import (
"bufio"
@@ -16,53 +16,35 @@ import (
"time"
"github.com/netdata/go.d.plugin/agent/discovery/sd/model"
+ "github.com/netdata/go.d.plugin/agent/executable"
"github.com/netdata/go.d.plugin/logger"
"github.com/ilyam8/hashstructure"
)
-type netSocketTargetGroup struct {
- provider string
- source string
- targets []model.Target
-}
-
-func (g *netSocketTargetGroup) Provider() string { return g.provider }
-func (g *netSocketTargetGroup) Source() string { return g.source }
-func (g *netSocketTargetGroup) Targets() []model.Target { return g.targets }
-
-type NetSocketTarget struct {
- model.Base
-
- hash uint64
-
- Protocol string
- Address string
- Port string
- Comm string
- Cmdline string
-}
-
-func (t *NetSocketTarget) TUID() string { return t.tuid() }
-func (t *NetSocketTarget) Hash() uint64 { return t.hash }
-func (t *NetSocketTarget) tuid() string {
- return fmt.Sprintf("%s_%s_%d", strings.ToLower(t.Protocol), t.Port, t.hash)
-}
+var (
+ shortName = "net_listeners"
+ fullName = fmt.Sprintf("sd:%s", shortName)
+)
-func NewNetSocketDiscoverer(cfg NetworkSocketConfig) (*NetDiscoverer, error) {
+func NewDiscoverer(cfg Config) (*Discoverer, error) {
tags, err := model.ParseTags(cfg.Tags)
if err != nil {
return nil, fmt.Errorf("parse tags: %v", err)
}
dir := os.Getenv("NETDATA_PLUGINS_DIR")
+ if dir == "" {
+ dir = executable.Directory
+ }
if dir == "" {
dir, _ = os.Getwd()
}
- d := &NetDiscoverer{
+ d := &Discoverer{
Logger: logger.New().With(
- slog.String("component", "discovery sd hostsocket"),
+ slog.String("component", "service discovery"),
+ slog.String("discoverer", shortName),
),
interval: time.Second * 60,
ll: &localListenersExec{
@@ -75,8 +57,12 @@ func NewNetSocketDiscoverer(cfg NetworkSocketConfig) (*NetDiscoverer, error) {
return d, nil
}
+type Config struct {
+ Tags string
+}
+
type (
- NetDiscoverer struct {
+ Discoverer struct {
*logger.Logger
model.Base
@@ -88,7 +74,11 @@ type (
}
)
-func (d *NetDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) {
+func (d *Discoverer) String() string {
+ return fullName
+}
+
+func (d *Discoverer) Discover(ctx context.Context, in chan<- []model.TargetGroup) {
if err := d.discoverLocalListeners(ctx, in); err != nil {
d.Error(err)
return
@@ -110,7 +100,7 @@ func (d *NetDiscoverer) Discover(ctx context.Context, in chan<- []model.TargetGr
}
}
-func (d *NetDiscoverer) discoverLocalListeners(ctx context.Context, in chan<- []model.TargetGroup) error {
+func (d *Discoverer) discoverLocalListeners(ctx context.Context, in chan<- []model.TargetGroup) error {
bs, err := d.ll.discover(ctx)
if err != nil {
if errors.Is(err, context.Canceled) {
@@ -128,10 +118,11 @@ func (d *NetDiscoverer) discoverLocalListeners(ctx context.Context, in chan<- []
case <-ctx.Done():
case in <- tggs:
}
+
return nil
}
-func (d *NetDiscoverer) parseLocalListeners(bs []byte) ([]model.TargetGroup, error) {
+func (d *Discoverer) parseLocalListeners(bs []byte) ([]model.TargetGroup, error) {
var tgts []model.Target
sc := bufio.NewScanner(bytes.NewReader(bs))
@@ -147,7 +138,7 @@ func (d *NetDiscoverer) parseLocalListeners(bs []byte) ([]model.TargetGroup, err
return nil, fmt.Errorf("unexpected data: '%s'", text)
}
- tgt := NetSocketTarget{
+ tgt := target{
Protocol: parts[0],
Address: parts[1],
Port: parts[2],
@@ -166,9 +157,9 @@ func (d *NetDiscoverer) parseLocalListeners(bs []byte) ([]model.TargetGroup, err
tgts = append(tgts, &tgt)
}
- tgg := &netSocketTargetGroup{
- provider: "hostsocket",
- source: "net",
+ tgg := &targetGroup{
+ provider: fullName,
+ source: fmt.Sprintf("discoverer=%s,host=localhost", shortName),
targets: tgts,
}
@@ -184,11 +175,22 @@ func (e *localListenersExec) discover(ctx context.Context) ([]byte, error) {
execCtx, cancel := context.WithTimeout(ctx, e.timeout)
defer cancel()
- cmd := exec.CommandContext(execCtx, e.binPath, "tcp") // TODO: tcp6?
+ // TCPv4 and UPDv4 sockets in LISTEN state
+ // https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/local_listeners.c
+ args := []string{
+ "no-udp6",
+ "no-tcp6",
+ "no-local",
+ "no-inbound",
+ "no-outbound",
+ "no-namespaces",
+ }
+
+ cmd := exec.CommandContext(execCtx, e.binPath, args...)
bs, err := cmd.Output()
if err != nil {
- return nil, fmt.Errorf("error on '%s': %v", cmd, err)
+ return nil, fmt.Errorf("error on executing '%s': %v", cmd, err)
}
return bs, nil
@@ -197,7 +199,7 @@ func (e *localListenersExec) discover(ctx context.Context) ([]byte, error) {
func extractComm(s string) string {
i := strings.IndexByte(s, ' ')
if i <= 0 {
- return ""
+ return s
}
_, comm := filepath.Split(s[:i])
return comm
diff --git a/agent/discovery/sd/hostsocket/net_test.go b/agent/discovery/sd/discoverer/netlisteners/netlisteners_test.go
similarity index 82%
rename from agent/discovery/sd/hostsocket/net_test.go
rename to agent/discovery/sd/discoverer/netlisteners/netlisteners_test.go
index 4ec860875..001f142a1 100644
--- a/agent/discovery/sd/hostsocket/net_test.go
+++ b/agent/discovery/sd/discoverer/netlisteners/netlisteners_test.go
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-package hostsocket
+package netlisteners
import (
"context"
@@ -19,37 +19,37 @@ UDP|127.0.0.1|53768|/opt/netdata/usr/libexec/netdata/plugins.d/go.d.plugin 1
`)
)
-func TestNetSocketDiscoverer_Discover(t *testing.T) {
+func TestDiscoverer_Discover(t *testing.T) {
tests := map[string]discoverySim{
"valid response": {
mock: &mockLocalListenersExec{},
wantDoneBeforeCancel: false,
- wantTargetGroups: []model.TargetGroup{&netSocketTargetGroup{
- provider: "hostsocket",
- source: "net",
+ wantTargetGroups: []model.TargetGroup{&targetGroup{
+ provider: "hostnetsocket",
+ source: "hostnetsocket",
targets: []model.Target{
- withHash(&NetSocketTarget{
+ withHash(&target{
Protocol: "UDP6",
Address: "::1",
Port: "8125",
Comm: "netdata",
Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
}),
- withHash(&NetSocketTarget{
+ withHash(&target{
Protocol: "TCP6",
Address: "::1",
Port: "8125",
Comm: "netdata",
Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
}),
- withHash(&NetSocketTarget{
+ withHash(&target{
Protocol: "TCP",
Address: "127.0.0.1",
Port: "8125",
Comm: "netdata",
Cmdline: "/opt/netdata/usr/sbin/netdata -P /run/netdata/netdata.pid -D",
}),
- withHash(&NetSocketTarget{
+ withHash(&target{
Protocol: "UDP",
Address: "127.0.0.1",
Port: "53768",
@@ -62,9 +62,9 @@ func TestNetSocketDiscoverer_Discover(t *testing.T) {
"empty response": {
mock: &mockLocalListenersExec{emptyResponse: true},
wantDoneBeforeCancel: false,
- wantTargetGroups: []model.TargetGroup{&netSocketTargetGroup{
- provider: "hostsocket",
- source: "net",
+ wantTargetGroups: []model.TargetGroup{&targetGroup{
+ provider: "hostnetsocket",
+ source: "hostnetsocket",
}},
},
"error on exec": {
@@ -86,9 +86,9 @@ func TestNetSocketDiscoverer_Discover(t *testing.T) {
}
}
-func withHash(l *NetSocketTarget) *NetSocketTarget {
+func withHash(l *target) *target {
l.hash, _ = calcHash(l)
- tags, _ := model.ParseTags("hostsocket net")
+ tags, _ := model.ParseTags("hostnetsocket")
l.Tags().Merge(tags)
return l
}
diff --git a/agent/discovery/sd/hostsocket/sim_test.go b/agent/discovery/sd/discoverer/netlisteners/sim_test.go
similarity index 89%
rename from agent/discovery/sd/hostsocket/sim_test.go
rename to agent/discovery/sd/discoverer/netlisteners/sim_test.go
index 998d9370c..2c4fe0260 100644
--- a/agent/discovery/sd/hostsocket/sim_test.go
+++ b/agent/discovery/sd/discoverer/netlisteners/sim_test.go
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-package hostsocket
+package netlisteners
import (
"context"
@@ -20,7 +20,7 @@ type discoverySim struct {
}
func (sim *discoverySim) run(t *testing.T) {
- d, err := NewNetSocketDiscoverer(NetworkSocketConfig{Tags: "hostsocket net"})
+ d, err := NewDiscoverer(Config{Tags: "hostnetsocket"})
require.NoError(t, err)
d.ll = sim.mock
@@ -45,7 +45,7 @@ func (sim *discoverySim) run(t *testing.T) {
}
}
-func (sim *discoverySim) collectTargetGroups(t *testing.T, ctx context.Context, d *NetDiscoverer) ([]model.TargetGroup, chan struct{}) {
+func (sim *discoverySim) collectTargetGroups(t *testing.T, ctx context.Context, d *Discoverer) ([]model.TargetGroup, chan struct{}) {
in := make(chan []model.TargetGroup)
done := make(chan struct{})
diff --git a/agent/discovery/sd/discoverer/netlisteners/target.go b/agent/discovery/sd/discoverer/netlisteners/target.go
new file mode 100644
index 000000000..80ca344f0
--- /dev/null
+++ b/agent/discovery/sd/discoverer/netlisteners/target.go
@@ -0,0 +1,39 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package netlisteners
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/agent/discovery/sd/model"
+)
+
+type targetGroup struct {
+ provider string
+ source string
+ targets []model.Target
+}
+
+func (g *targetGroup) Provider() string { return g.provider }
+func (g *targetGroup) Source() string { return g.source }
+func (g *targetGroup) Targets() []model.Target { return g.targets }
+
+type target struct {
+ model.Base
+
+ hash uint64
+
+ Protocol string
+ Address string
+ Port string
+ Comm string
+ Cmdline string
+}
+
+func (t *target) TUID() string { return tuid(t) }
+func (t *target) Hash() uint64 { return t.hash }
+
+func tuid(tgt *target) string {
+ return fmt.Sprintf("%s_%s_%d", strings.ToLower(tgt.Protocol), tgt.Port, tgt.hash)
+}
diff --git a/agent/discovery/sd/hostsocket/config.go b/agent/discovery/sd/hostsocket/config.go
deleted file mode 100644
index 8b47fc0d8..000000000
--- a/agent/discovery/sd/hostsocket/config.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package hostsocket
-
-type NetworkSocketConfig struct {
- Tags string
-}
diff --git a/agent/discovery/sd/pipeline/config.go b/agent/discovery/sd/pipeline/config.go
index faed30e36..027139987 100644
--- a/agent/discovery/sd/pipeline/config.go
+++ b/agent/discovery/sd/pipeline/config.go
@@ -5,25 +5,22 @@ package pipeline
import (
"errors"
"fmt"
- "github.com/netdata/go.d.plugin/agent/discovery/sd/hostsocket"
- "github.com/netdata/go.d.plugin/agent/discovery/sd/kubernetes"
+ "github.com/netdata/go.d.plugin/agent/discovery/sd/discoverer/kubernetes"
+ "github.com/netdata/go.d.plugin/agent/discovery/sd/discoverer/netlisteners"
)
type Config struct {
Name string `yaml:"name"`
- Discovery DiscoveryConfig `yaml:"discovery"`
+ Discovery DiscoveryConfig `yaml:"discover"`
Classify []ClassifyRuleConfig `yaml:"classify"`
- Compose []ComposeRuleConfig `yaml:"compose"` // TODO: "jobs"?
+ Compose []ComposeRuleConfig `yaml:"compose"`
}
type (
DiscoveryConfig struct {
- K8s []kubernetes.Config `yaml:"k8s"`
- HostSocket HostSocketConfig `yaml:"hostsocket"`
- }
- HostSocketConfig struct {
- Net *hostsocket.NetworkSocketConfig `yaml:"net"`
+ K8s []kubernetes.Config `yaml:"k8s"`
+ NetListeners *netlisteners.Config `yaml:"net_listeners"`
}
)
@@ -47,11 +44,11 @@ type ComposeRuleConfig struct {
}
func validateConfig(cfg Config) error {
- if cfg.Name != "" {
+ if cfg.Name == "" {
return errors.New("'name' not set")
}
- if len(cfg.Discovery.K8s) == 0 {
- return errors.New("'discovery->k8s' not set")
+ if len(cfg.Discovery.K8s) == 0 && cfg.Discovery.NetListeners == nil {
+ return errors.New("'discover' not set")
}
if err := validateClassifyConfig(cfg.Classify); err != nil {
return fmt.Errorf("tag rules: %v", err)
diff --git a/agent/discovery/sd/pipeline/funcmap.go b/agent/discovery/sd/pipeline/funcmap.go
index d49b0d3e3..3cf8de261 100644
--- a/agent/discovery/sd/pipeline/funcmap.go
+++ b/agent/discovery/sd/pipeline/funcmap.go
@@ -8,15 +8,19 @@ import (
"github.com/Masterminds/sprig/v3"
"github.com/bmatcuk/doublestar/v4"
+ "github.com/netdata/go.d.plugin/pkg/matcher"
)
func newFuncMap() template.FuncMap {
custom := map[string]interface{}{
- "glob": globAny,
- "re": regexpAny,
+ "match": funcMatchAny,
+ "glob": func(value, pattern string, patterns ...string) bool {
+ return funcMatchAny("glob", value, pattern, patterns...)
+ },
}
fm := sprig.HermeticTxtFuncMap()
+
for name, fn := range custom {
fm[name] = fn
}
@@ -24,30 +28,30 @@ func newFuncMap() template.FuncMap {
return fm
}
-func globAny(value, pattern string, rest ...string) bool {
- switch len(rest) {
+func funcMatchAny(typ, value, pattern string, patterns ...string) bool {
+ switch len(patterns) {
case 0:
- return globOnce(value, pattern)
+ return funcMatch(typ, value, pattern)
default:
- return globOnce(value, pattern) || globAny(value, rest[0], rest[1:]...)
+ return funcMatch(typ, value, pattern) || funcMatchAny(typ, value, patterns[0], patterns[1:]...)
}
}
-func regexpAny(value, pattern string, rest ...string) bool {
- switch len(rest) {
- case 0:
- return regexpOnce(value, pattern)
+func funcMatch(typ string, value, pattern string) bool {
+ switch typ {
+ case "glob", "":
+ m, err := matcher.NewGlobMatcher(pattern)
+ return err == nil && m.MatchString(value)
+ case "sp":
+ m, err := matcher.NewSimplePatternsMatcher(pattern)
+ return err == nil && m.MatchString(value)
+ case "re":
+ ok, err := regexp.MatchString(pattern, value)
+ return err == nil && ok
+ case "dstar":
+ ok, err := doublestar.Match(pattern, value)
+ return err == nil && ok
default:
- return regexpOnce(value, pattern) || regexpAny(value, rest[0], rest[1:]...)
+ return false
}
}
-
-func globOnce(value, pattern string) bool {
- ok, err := doublestar.Match(pattern, value)
- return err == nil && ok
-}
-
-func regexpOnce(value, pattern string) bool {
- ok, err := regexp.MatchString(pattern, value)
- return err == nil && ok
-}
diff --git a/agent/discovery/sd/pipeline/funcmap_test.go b/agent/discovery/sd/pipeline/funcmap_test.go
index c8ced5170..3de71ef70 100644
--- a/agent/discovery/sd/pipeline/funcmap_test.go
+++ b/agent/discovery/sd/pipeline/funcmap_test.go
@@ -3,93 +3,79 @@
package pipeline
import (
- "fmt"
"testing"
"github.com/stretchr/testify/assert"
)
-func Test_globAny(t *testing.T) {
+func Test_funcMatchAny(t *testing.T) {
tests := map[string]struct {
+ typ string
patterns []string
value string
wantMatch bool
}{
- "one param, matches": {
+ "dstar: one param, matches": {
wantMatch: true,
+ typ: "dstar",
patterns: []string{"*"},
value: "value",
},
- "one param, matches with *": {
+ "dstar: one param, matches with *": {
wantMatch: true,
+ typ: "dstar",
patterns: []string{"**/value"},
value: "/one/two/three/value",
},
- "one param, not matches": {
+ "dstar: one param, not matches": {
wantMatch: false,
+ typ: "dstar",
patterns: []string{"Value"},
value: "value",
},
- "several params, last one matches": {
+ "dstar: several params, last one matches": {
wantMatch: true,
+ typ: "dstar",
patterns: []string{"not", "matches", "*"},
value: "value",
},
- "several params, no matches": {
+ "dstar: several params, no matches": {
wantMatch: false,
+ typ: "dstar",
patterns: []string{"not", "matches", "really"},
value: "value",
},
- }
-
- for name, test := range tests {
- name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value)
- ok := globAny(test.value, test.patterns[0], test.patterns[1:]...)
-
- if test.wantMatch {
- assert.Truef(t, ok, name)
- } else {
- assert.Falsef(t, ok, name)
- }
- }
-}
-
-func Test_regexpAny(t *testing.T) {
- tests := map[string]struct {
- patterns []string
- value string
- wantMatch bool
- }{
- "one param, matches": {
+ "re: one param, matches": {
wantMatch: true,
+ typ: "re",
patterns: []string{"^value$"},
value: "value",
},
- "one param, not matches": {
+ "re: one param, not matches": {
wantMatch: false,
+ typ: "re",
patterns: []string{"^Value$"},
value: "value",
},
- "several params, last one matches": {
+ "re: several params, last one matches": {
wantMatch: true,
+ typ: "re",
patterns: []string{"not", "matches", "va[lue]{3}"},
value: "value",
},
- "several params, no matches": {
+ "re: several params, no matches": {
wantMatch: false,
+ typ: "re",
patterns: []string{"not", "matches", "val[^l]ue"},
value: "value",
},
}
for name, test := range tests {
- name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value)
- ok := regexpAny(test.value, test.patterns[0], test.patterns[1:]...)
+ t.Run(name, func(t *testing.T) {
+ ok := funcMatchAny(test.typ, test.value, test.patterns[0], test.patterns[1:]...)
- if test.wantMatch {
- assert.Truef(t, ok, name)
- } else {
- assert.Falsef(t, ok, name)
- }
+ assert.Equal(t, test.wantMatch, ok)
+ })
}
}
diff --git a/agent/discovery/sd/pipeline/pipeline.go b/agent/discovery/sd/pipeline/pipeline.go
index 1a1eb69f9..0ec4674c7 100644
--- a/agent/discovery/sd/pipeline/pipeline.go
+++ b/agent/discovery/sd/pipeline/pipeline.go
@@ -7,9 +7,10 @@ import (
"log/slog"
"time"
+ "github.com/netdata/go.d.plugin/agent/discovery/sd/discoverer/kubernetes"
+ "github.com/netdata/go.d.plugin/agent/discovery/sd/discoverer/netlisteners"
+
"github.com/netdata/go.d.plugin/agent/confgroup"
- "github.com/netdata/go.d.plugin/agent/discovery/sd/hostsocket"
- "github.com/netdata/go.d.plugin/agent/discovery/sd/kubernetes"
"github.com/netdata/go.d.plugin/agent/discovery/sd/model"
"github.com/netdata/go.d.plugin/logger"
)
@@ -19,14 +20,28 @@ func New(cfg Config) (*Pipeline, error) {
return nil, err
}
+ clr, err := newTargetClassificator(cfg.Classify)
+ if err != nil {
+ return nil, err
+ }
+
+ cmr, err := newConfigComposer(cfg.Compose)
+ if err != nil {
+ return nil, err
+ }
+
p := &Pipeline{
Logger: logger.New().With(
- slog.String("component", "discovery sd pipeline"),
+ slog.String("component", "service discovery"),
+ slog.String("pipeline", cfg.Name),
),
+ clr: clr,
+ cmr: cmr,
accum: newAccumulator(),
discoverers: make([]model.Discoverer, 0),
items: make(map[string]map[uint64][]confgroup.Config),
}
+ p.accum.Logger = p.Logger
if err := p.registerDiscoverers(cfg); err != nil {
return nil, err
@@ -63,8 +78,8 @@ func (p *Pipeline) registerDiscoverers(conf Config) error {
}
p.discoverers = append(p.discoverers, td)
}
- if conf.Discovery.HostSocket.Net != nil {
- td, err := hostsocket.NewNetSocketDiscoverer(*conf.Discovery.HostSocket.Net)
+ if conf.Discovery.NetListeners != nil {
+ td, err := netlisteners.NewDiscoverer(*conf.Discovery.NetListeners)
if err != nil {
return err
}
@@ -103,15 +118,15 @@ func (p *Pipeline) Run(ctx context.Context, in chan<- []*confgroup.Group) {
}
func (p *Pipeline) processGroups(tggs []model.TargetGroup) []*confgroup.Group {
- var confGroups []*confgroup.Group
+ var groups []*confgroup.Group
// updates come from the accumulator, this ensures that all groups have different sources
for _, tgg := range tggs {
p.Infof("processing group '%s' with %d target(s)", tgg.Source(), len(tgg.Targets()))
if v := p.processGroup(tgg); v != nil {
- confGroups = append(confGroups, v)
+ groups = append(groups, v)
}
}
- return confGroups
+ return groups
}
func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group {
@@ -120,6 +135,7 @@ func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group {
return nil
}
delete(p.items, tgg.Source())
+
return &confgroup.Group{Source: tgg.Source()}
}
@@ -149,14 +165,16 @@ func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group {
if configs := p.cmr.compose(tgt); len(configs) > 0 {
for _, cfg := range configs {
+ // TODO: set
cfg.SetProvider(tgg.Provider())
cfg.SetSource(tgg.Source())
+ cfg.SetSourceType(confgroup.TypeDiscovered)
}
targetsCache[hash] = configs
changed = true
}
} else {
- p.Infof("target '%s' classify: fail", tgt.TUID())
+ targetsCache[hash] = nil
}
}
@@ -176,6 +194,7 @@ func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group {
// TODO: deepcopy?
cfgGroup := &confgroup.Group{Source: tgg.Source()}
+
for _, cfgs := range targetsCache {
cfgGroup.Configs = append(cfgGroup.Configs, cfgs...)
}
diff --git a/agent/discovery/sd/pipeline/pipeline_test.go b/agent/discovery/sd/pipeline/pipeline_test.go
index ae6c5991a..fbe1028d1 100644
--- a/agent/discovery/sd/pipeline/pipeline_test.go
+++ b/agent/discovery/sd/pipeline/pipeline_test.go
@@ -89,18 +89,7 @@ compose:
wantClassifyCalls: 2,
wantComposeCalls: 2,
wantConfGroups: []*confgroup.Group{
- {Source: "test", Configs: []confgroup.Config{
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock1-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock2-foobar2",
- },
- }},
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
},
},
"existing group with same targets": {
@@ -116,21 +105,10 @@ compose:
wantClassifyCalls: 2,
wantComposeCalls: 2,
wantConfGroups: []*confgroup.Group{
- {Source: "test", Configs: []confgroup.Config{
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock1-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock2-foobar2",
- },
- }},
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
},
},
- "existing empty group that previously had targets": {
+ "existing group that previously had targets with no targets": {
config: config,
discoverers: []model.Discoverer{
newMockDiscoverer("rule1",
@@ -143,19 +121,8 @@ compose:
wantClassifyCalls: 2,
wantComposeCalls: 2,
wantConfGroups: []*confgroup.Group{
- {Source: "test", Configs: []confgroup.Config{
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock1-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock2-foobar2",
- },
- }},
- {Source: "test", Configs: nil},
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ prepareDiscoveredGroup(),
},
},
"existing group with old and new targets": {
@@ -171,40 +138,8 @@ compose:
wantClassifyCalls: 4,
wantComposeCalls: 4,
wantConfGroups: []*confgroup.Group{
- {Source: "test", Configs: []confgroup.Config{
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock1-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock2-foobar2",
- },
- }},
- {Source: "test", Configs: []confgroup.Config{
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock1-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock2-foobar2",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock11-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock22-foobar2",
- },
- }},
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2", "mock11-foobar1", "mock22-foobar2"),
},
},
"existing group with new targets only": {
@@ -220,30 +155,8 @@ compose:
wantClassifyCalls: 4,
wantComposeCalls: 4,
wantConfGroups: []*confgroup.Group{
- {Source: "test", Configs: []confgroup.Config{
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock1-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock2-foobar2",
- },
- }},
- {Source: "test", Configs: []confgroup.Config{
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock11-foobar1",
- },
- {
- "__provider__": "mock",
- "__source__": "test",
- "name": "mock22-foobar2",
- },
- }},
+ prepareDiscoveredGroup("mock1-foobar1", "mock2-foobar2"),
+ prepareDiscoveredGroup("mock11-foobar1", "mock22-foobar2"),
},
},
}
@@ -255,6 +168,23 @@ compose:
}
}
+func prepareDiscoveredGroup(configNames ...string) *confgroup.Group {
+ var configs []confgroup.Config
+
+ for _, name := range configNames {
+ configs = append(configs, confgroup.Config{}.
+ SetProvider("mock").
+ SetSourceType(confgroup.TypeDiscovered).
+ SetSource("test").
+ SetName(name))
+ }
+
+ return &confgroup.Group{
+ Source: "test",
+ Configs: configs,
+ }
+}
+
func newMockDiscoverer(tags string, tggs ...model.TargetGroup) *mockDiscoverer {
return &mockDiscoverer{
tags: mustParseTags(tags),
diff --git a/agent/discovery/sd/pipeline/qq.yaml b/agent/discovery/sd/pipeline/qq.yaml
deleted file mode 100644
index e2ed5e402..000000000
--- a/agent/discovery/sd/pipeline/qq.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
-name: qqq
-discovery:
- k8s:
- - pod:
- tags: "pod"
- local_mode: yes
- service:
- tags: "service"
- hostsocket:
- net:
- tags: "netsocket"
- unix:
- tags: "unixsocket"
- docker:
- - address: "1"
- tags: "qq"
-
-
-classify:
- - name: "name"
- selector: "k8s"
- tags: "apps"
- match:
- - tags: "apache"
- expr: '{{ and (eq .Port "8161") (glob .Image "**/activemq*") }}'
-
-compose:
- - name: "Applications"
- selector: "apps"
- config:
- - selector: "apache"
- template: |
- module: bind
- name: bind-{{.TUID}}
diff --git a/agent/discovery/sd/sd.go b/agent/discovery/sd/sd.go
index 7897a659d..a47ea92e7 100644
--- a/agent/discovery/sd/sd.go
+++ b/agent/discovery/sd/sd.go
@@ -4,81 +4,99 @@ package sd
import (
"context"
+ "log/slog"
+ "os"
"sync"
"github.com/netdata/go.d.plugin/agent/confgroup"
"github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline"
"github.com/netdata/go.d.plugin/logger"
+ "github.com/netdata/go.d.plugin/pkg/multipath"
"gopkg.in/yaml.v2"
)
-func NewServiceDiscovery() (*ServiceDiscovery, error) {
- return nil, nil
+type Config struct {
+ ConfDir multipath.MultiPath
+}
+
+func NewServiceDiscovery(cfg Config) (*ServiceDiscovery, error) {
+ d := &ServiceDiscovery{
+ Logger: logger.New().With(
+ slog.String("component", "service discovery"),
+ ),
+ ConfDir: cfg.ConfDir,
+
+ newPipeline: func(config pipeline.Config) (sdPipeline, error) {
+ return pipeline.New(config)
+ },
+ pipelines: make(map[string]func()),
+ }
+
+ return d, nil
}
type (
ServiceDiscovery struct {
*logger.Logger
+ ConfDir multipath.MultiPath
- confProv ConfigFileProvider
- sdFactory sdPipelineFactory
+ newPipeline func(config pipeline.Config) (sdPipeline, error)
- confCache map[string]uint64
pipelines map[string]func()
}
sdPipeline interface {
Run(ctx context.Context, in chan<- []*confgroup.Group)
}
- sdPipelineFactory interface {
- create(config pipeline.Config) (sdPipeline, error)
- }
)
func (d *ServiceDiscovery) Run(ctx context.Context, in chan<- []*confgroup.Group) {
d.Info("instance is started")
- defer d.Info("instance is stopped")
- defer d.cleanup()
+ defer func() { d.cleanup(); d.Info("instance is stopped") }()
- var wg sync.WaitGroup
+ files, err := d.ConfDir.FindFiles(".yaml")
+ if err != nil {
+ d.Error(err)
+ return
+ }
- wg.Add(1)
- go func() { defer wg.Done(); d.confProv.Run(ctx) }()
-
- for {
- select {
- case <-ctx.Done():
- return
- case cf := <-d.confProv.Configs():
- if cf.Source == "" {
- continue
- }
- if len(cf.Data) == 0 {
- delete(d.confCache, cf.Source)
- d.removePipeline(cf)
- } else if hash, ok := d.confCache[cf.Source]; !ok || hash != cf.Hash() {
- d.confCache[cf.Source] = cf.Hash()
- d.addPipeline(ctx, cf, in)
- }
- }
+ for _, filename := range files {
+ d.addPipeline(ctx, filename, in)
}
+
+ if len(d.pipelines) == 0 {
+ return
+ }
+
+ <-ctx.Done()
}
-func (d *ServiceDiscovery) addPipeline(ctx context.Context, cf ConfigFile, in chan<- []*confgroup.Group) {
+func (d *ServiceDiscovery) addPipeline(ctx context.Context, file string, in chan<- []*confgroup.Group) {
+ bs, err := os.ReadFile(file)
+ if err != nil {
+ d.Error(err)
+ return
+ }
+
var cfg pipeline.Config
- if err := yaml.Unmarshal(cf.Data, &cfg); err != nil {
+ if err := yaml.Unmarshal(bs, &cfg); err != nil {
d.Error(err)
return
}
- pl, err := d.sdFactory.create(cfg)
+ if cfg.Name == "" {
+ d.Warningf("pipeline config '%s' has no name, skipping it", file)
+ return
+ }
+
+ pl, err := d.newPipeline(cfg)
if err != nil {
d.Error(err)
return
}
- if stop, ok := d.pipelines[cf.Source]; ok {
+ if stop, ok := d.pipelines[cfg.Name]; ok {
stop()
}
@@ -87,16 +105,9 @@ func (d *ServiceDiscovery) addPipeline(ctx context.Context, cf ConfigFile, in ch
wg.Add(1)
go func() { defer wg.Done(); pl.Run(plCtx, in) }()
- stop := func() { cancel(); wg.Wait() }
-
- d.pipelines[cf.Source] = stop
-}
-func (d *ServiceDiscovery) removePipeline(cf ConfigFile) {
- if stop, ok := d.pipelines[cf.Source]; ok {
- delete(d.pipelines, cf.Source)
- stop()
- }
+ stop := func() { cancel(); wg.Wait() }
+ d.pipelines[cfg.Name] = stop
}
func (d *ServiceDiscovery) cleanup() {
diff --git a/agent/discovery/sd/sd_test.go b/agent/discovery/sd/sd_test.go
index b67921e96..72616fa51 100644
--- a/agent/discovery/sd/sd_test.go
+++ b/agent/discovery/sd/sd_test.go
@@ -2,88 +2,89 @@
package sd
-import (
- "testing"
-
- "github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline"
-
- "gopkg.in/yaml.v2"
-)
-
-func TestServiceDiscovery_Run(t *testing.T) {
- tests := map[string]discoverySim{
- "add pipeline": {
- configs: []ConfigFile{
- prepareConfigFile("source", "name"),
- },
- wantPipelines: []*mockPipeline{
- {name: "name", started: true, stopped: false},
- },
- },
- "remove pipeline": {
- configs: []ConfigFile{
- prepareConfigFile("source", "name"),
- prepareEmptyConfigFile("source"),
- },
- wantPipelines: []*mockPipeline{
- {name: "name", started: true, stopped: true},
- },
- },
- "re-add pipeline multiple times": {
- configs: []ConfigFile{
- prepareConfigFile("source", "name"),
- prepareConfigFile("source", "name"),
- prepareConfigFile("source", "name"),
- },
- wantPipelines: []*mockPipeline{
- {name: "name", started: true, stopped: false},
- },
- },
- "restart pipeline": {
- configs: []ConfigFile{
- prepareConfigFile("source", "name1"),
- prepareConfigFile("source", "name2"),
- },
- wantPipelines: []*mockPipeline{
- {name: "name1", started: true, stopped: true},
- {name: "name2", started: true, stopped: false},
- },
- },
- "invalid pipeline config": {
- configs: []ConfigFile{
- prepareConfigFile("source", "invalid"),
- },
- wantPipelines: nil,
- },
- "invalid config for running pipeline": {
- configs: []ConfigFile{
- prepareConfigFile("source", "name"),
- prepareConfigFile("source", "invalid"),
- },
- wantPipelines: []*mockPipeline{
- {name: "name", started: true, stopped: false},
- },
- },
- }
-
- for name, sim := range tests {
- t.Run(name, func(t *testing.T) {
- sim.run(t)
- })
- }
-}
-
-func prepareConfigFile(source, name string) ConfigFile {
- bs, _ := yaml.Marshal(pipeline.Config{Name: name})
-
- return ConfigFile{
- Source: source,
- Data: bs,
- }
-}
-
-func prepareEmptyConfigFile(source string) ConfigFile {
- return ConfigFile{
- Source: source,
- }
-}
+//
+//import (
+// "testing"
+//
+// "github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline"
+//
+// "gopkg.in/yaml.v2"
+//)
+//
+//func TestServiceDiscovery_Run(t *testing.T) {
+// tests := map[string]discoverySim{
+// "add pipeline": {
+// configs: []ConfigFile{
+// prepareConfigFile("source", "name"),
+// },
+// wantPipelines: []*mockPipeline{
+// {name: "name", started: true, stopped: false},
+// },
+// },
+// "remove pipeline": {
+// configs: []ConfigFile{
+// prepareConfigFile("source", "name"),
+// prepareEmptyConfigFile("source"),
+// },
+// wantPipelines: []*mockPipeline{
+// {name: "name", started: true, stopped: true},
+// },
+// },
+// "re-add pipeline multiple times": {
+// configs: []ConfigFile{
+// prepareConfigFile("source", "name"),
+// prepareConfigFile("source", "name"),
+// prepareConfigFile("source", "name"),
+// },
+// wantPipelines: []*mockPipeline{
+// {name: "name", started: true, stopped: false},
+// },
+// },
+// "restart pipeline": {
+// configs: []ConfigFile{
+// prepareConfigFile("source", "name1"),
+// prepareConfigFile("source", "name2"),
+// },
+// wantPipelines: []*mockPipeline{
+// {name: "name1", started: true, stopped: true},
+// {name: "name2", started: true, stopped: false},
+// },
+// },
+// "invalid pipeline config": {
+// configs: []ConfigFile{
+// prepareConfigFile("source", "invalid"),
+// },
+// wantPipelines: nil,
+// },
+// "invalid config for running pipeline": {
+// configs: []ConfigFile{
+// prepareConfigFile("source", "name"),
+// prepareConfigFile("source", "invalid"),
+// },
+// wantPipelines: []*mockPipeline{
+// {name: "name", started: true, stopped: false},
+// },
+// },
+// }
+//
+// for name, sim := range tests {
+// t.Run(name, func(t *testing.T) {
+// sim.run(t)
+// })
+// }
+//}
+//
+//func prepareConfigFile(source, name string) ConfigFile {
+// bs, _ := yaml.Marshal(pipeline.Config{Name: name})
+//
+// return ConfigFile{
+// Source: source,
+// Data: bs,
+// }
+//}
+//
+//func prepareEmptyConfigFile(source string) ConfigFile {
+// return ConfigFile{
+// Source: source,
+// }
+//}
diff --git a/agent/discovery/sd/sim_test.go b/agent/discovery/sd/sim_test.go
index 9ddb15e50..ada1d15d3 100644
--- a/agent/discovery/sd/sim_test.go
+++ b/agent/discovery/sd/sim_test.go
@@ -2,116 +2,117 @@
package sd
-import (
- "context"
- "errors"
- "sync"
- "testing"
- "time"
-
- "github.com/netdata/go.d.plugin/agent/confgroup"
- "github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline"
- "github.com/netdata/go.d.plugin/logger"
-
- "github.com/stretchr/testify/assert"
-)
-
-var lock = &sync.Mutex{}
-
-type discoverySim struct {
- configs []ConfigFile
- wantPipelines []*mockPipeline
-}
-
-func (sim *discoverySim) run(t *testing.T) {
- fact := &mockFactory{}
- mgr := &ServiceDiscovery{
- Logger: logger.New(),
- sdFactory: fact,
- confProv: &mockConfigProvider{
- configs: sim.configs,
- ch: make(chan ConfigFile),
- },
- confCache: make(map[string]uint64),
- pipelines: make(map[string]func()),
- }
-
- in := make(chan<- []*confgroup.Group)
- done := make(chan struct{})
- ctx, cancel := context.WithCancel(context.Background())
-
- go func() { defer close(done); mgr.Run(ctx, in) }()
-
- time.Sleep(time.Second * 3)
-
- lock.Lock()
- assert.Equalf(t, sim.wantPipelines, fact.pipelines, "before stop")
- lock.Unlock()
-
- cancel()
-
- timeout := time.Second * 5
-
- select {
- case <-done:
- lock.Lock()
- for _, pl := range fact.pipelines {
- assert.Truef(t, pl.stopped, "pipeline '%s' is not stopped after cancel()", pl.name)
- }
- lock.Unlock()
- case <-time.After(timeout):
- t.Errorf("sd failed to exit in %s", timeout)
- }
-}
-
-type mockConfigProvider struct {
- configs []ConfigFile
- ch chan ConfigFile
-}
-
-func (m *mockConfigProvider) Run(ctx context.Context) {
- for _, conf := range m.configs {
- select {
- case <-ctx.Done():
- return
- case m.ch <- conf:
- }
- }
- <-ctx.Done()
-}
-
-func (m *mockConfigProvider) Configs() chan ConfigFile {
- return m.ch
-}
-
-type mockFactory struct {
- pipelines []*mockPipeline
-}
-
-func (m *mockFactory) create(cfg pipeline.Config) (sdPipeline, error) {
- lock.Lock()
- defer lock.Unlock()
-
- if cfg.Name == "invalid" {
- return nil, errors.New("mock sdPipelineFactory.create() error")
- }
-
- pl := mockPipeline{name: cfg.Name}
- m.pipelines = append(m.pipelines, &pl)
-
- return &pl, nil
-}
-
-type mockPipeline struct {
- name string
- started bool
- stopped bool
-}
-
-func (m *mockPipeline) Run(ctx context.Context, _ chan<- []*confgroup.Group) {
- lock.Lock()
- m.started = true
- lock.Unlock()
- defer func() { lock.Lock(); m.stopped = true; lock.Unlock() }()
- <-ctx.Done()
-}
+//
+//import (
+// "context"
+// "errors"
+// "sync"
+// "testing"
+// "time"
+//
+// "github.com/netdata/go.d.plugin/agent/confgroup"
+// "github.com/netdata/go.d.plugin/agent/discovery/sd/pipeline"
+// "github.com/netdata/go.d.plugin/logger"
+//
+// "github.com/stretchr/testify/assert"
+//)
+//
+//var lock = &sync.Mutex{}
+//
+//type discoverySim struct {
+// configs []ConfigFile
+// wantPipelines []*mockPipeline
+//}
+//
+//func (sim *discoverySim) run(t *testing.T) {
+// fact := &mockFactory{}
+// mgr := &ServiceDiscovery{
+// Logger: logger.New(),
+// sdFactory: fact,
+// confProv: &mockConfigProvider{
+// configs: sim.configs,
+// ch: make(chan ConfigFile),
+// },
+// confCache: make(map[string]uint64),
+// pipelines: make(map[string]func()),
+// }
+//
+// in := make(chan<- []*confgroup.Group)
+// done := make(chan struct{})
+// ctx, cancel := context.WithCancel(context.Background())
+//
+// go func() { defer close(done); mgr.Run(ctx, in) }()
+//
+// time.Sleep(time.Second * 3)
+//
+// lock.Lock()
+// assert.Equalf(t, sim.wantPipelines, fact.pipelines, "before stop")
+// lock.Unlock()
+//
+// cancel()
+//
+// timeout := time.Second * 5
+//
+// select {
+// case <-done:
+// lock.Lock()
+// for _, pl := range fact.pipelines {
+// assert.Truef(t, pl.stopped, "pipeline '%s' is not stopped after cancel()", pl.name)
+// }
+// lock.Unlock()
+// case <-time.After(timeout):
+// t.Errorf("sd failed to exit in %s", timeout)
+// }
+//}
+//
+//type mockConfigProvider struct {
+// configs []ConfigFile
+// ch chan ConfigFile
+//}
+//
+//func (m *mockConfigProvider) Run(ctx context.Context) {
+// for _, conf := range m.configs {
+// select {
+// case <-ctx.Done():
+// return
+// case m.ch <- conf:
+// }
+// }
+// <-ctx.Done()
+//}
+//
+//func (m *mockConfigProvider) Configs() chan ConfigFile {
+// return m.ch
+//}
+//
+//type mockFactory struct {
+// pipelines []*mockPipeline
+//}
+//
+//func (m *mockFactory) create(cfg pipeline.Config) (sdPipeline, error) {
+// lock.Lock()
+// defer lock.Unlock()
+//
+// if cfg.Name == "invalid" {
+// return nil, errors.New("mock sdPipelineFactory.create() error")
+// }
+//
+// pl := mockPipeline{name: cfg.Name}
+// m.pipelines = append(m.pipelines, &pl)
+//
+// return &pl, nil
+//}
+//
+//type mockPipeline struct {
+// name string
+// started bool
+// stopped bool
+//}
+//
+//func (m *mockPipeline) Run(ctx context.Context, _ chan<- []*confgroup.Group) {
+// lock.Lock()
+// m.started = true
+// lock.Unlock()
+// defer func() { lock.Lock(); m.stopped = true; lock.Unlock() }()
+// <-ctx.Done()
+//}
diff --git a/agent/executable/executable.go b/agent/executable/executable.go
index 3aead7943..cb09db1eb 100644
--- a/agent/executable/executable.go
+++ b/agent/executable/executable.go
@@ -22,7 +22,10 @@ func init() {
_, Name = filepath.Split(path)
Name = strings.TrimSuffix(Name, ".plugin")
- Name = strings.TrimSuffix(Name, ".test")
+
+ if strings.HasSuffix(Name, ".test") {
+ Name = "test"
+ }
// FIXME: can't use logger because of circular import
fi, err := os.Lstat(path)
diff --git a/agent/functions/function.go b/agent/functions/function.go
index 46a728994..c23301c4d 100644
--- a/agent/functions/function.go
+++ b/agent/functions/function.go
@@ -13,17 +13,20 @@ import (
)
type Function struct {
- key string
- UID string
- Timeout time.Duration
- Name string
- Args []string
- Payload []byte
+ key string
+ UID string
+ Timeout time.Duration
+ Name string
+ Args []string
+ Payload []byte
+ Permissions string
+ Source string
+ ContentType string
}
func (f *Function) String() string {
- return fmt.Sprintf("key: %s, uid: %s, timeout: %s, function: %s, args: %v, payload: %s",
- f.key, f.UID, f.Timeout, f.Name, f.Args, string(f.Payload))
+ return fmt.Sprintf("key: '%s', uid: '%s', timeout: '%s', function: '%s', args: '%v', permissions: '%s', source: '%s', contentType: '%s', payload: '%s'",
+ f.key, f.UID, f.Timeout, f.Name, f.Args, f.Permissions, f.Source, f.ContentType, string(f.Payload))
}
func parseFunction(s string) (*Function, error) {
@@ -34,8 +37,9 @@ func parseFunction(s string) (*Function, error) {
if err != nil {
return nil, err
}
- if len(parts) != 4 {
- return nil, fmt.Errorf("unexpected number of words: want 4, got %d (%v)", len(parts), parts)
+
+ if n := len(parts); n != 6 && n != 7 {
+ return nil, fmt.Errorf("unexpected number of words: want 6 or 7, got %d (%v)", n, parts)
}
timeout, err := strconv.ParseInt(parts[2], 10, 64)
@@ -43,14 +47,21 @@ func parseFunction(s string) (*Function, error) {
return nil, err
}
+ // 'FUNCTION_PAYLOAD 5d50db31d7e446768809b95382789257 120 \"config go.d:collector:example:jobs add example3\" \"method=api,role=god,ip=10.20.4.44\" \"text/yaml\"'
cmd := strings.Split(parts[3], " ")
fn := &Function{
- key: parts[0],
- UID: parts[1],
- Timeout: time.Duration(timeout) * time.Second,
- Name: cmd[0],
- Args: cmd[1:],
+ key: parts[0],
+ UID: parts[1],
+ Timeout: time.Duration(timeout) * time.Second,
+ Name: cmd[0],
+ Args: cmd[1:],
+ Permissions: parts[4],
+ Source: parts[5],
+ }
+
+ if len(parts) == 7 {
+ fn.ContentType = parts[6]
}
return fn, nil
diff --git a/agent/functions/manager.go b/agent/functions/manager.go
index 760780cff..189ec4c76 100644
--- a/agent/functions/manager.go
+++ b/agent/functions/manager.go
@@ -5,12 +5,15 @@ package functions
import (
"bufio"
"context"
+ "fmt"
"io"
"log/slog"
"os"
"strings"
"sync"
+ "github.com/netdata/go.d.plugin/agent/netdataapi"
+ "github.com/netdata/go.d.plugin/agent/safewriter"
"github.com/netdata/go.d.plugin/logger"
"github.com/mattn/go-isatty"
@@ -25,6 +28,7 @@ func NewManager() *Manager {
slog.String("component", "functions manager"),
),
Input: os.Stdin,
+ api: netdataapi.New(safewriter.Stdout),
mux: &sync.Mutex{},
FunctionRegistry: make(map[string]func(Function)),
}
@@ -34,18 +38,11 @@ type Manager struct {
*logger.Logger
Input io.Reader
+ api *netdataapi.API
mux *sync.Mutex
FunctionRegistry map[string]func(Function)
}
-func (m *Manager) Register(name string, fn func(Function)) {
- if fn == nil {
- m.Warningf("not registering '%s': nil function", name)
- return
- }
- m.addFunction(name, fn)
-}
-
func (m *Manager) Run(ctx context.Context) {
m.Info("instance is started")
defer func() { m.Info("instance is stopped") }()
@@ -102,19 +99,25 @@ func (m *Manager) run(r io.Reader) {
function, ok := m.lookupFunction(fn.Name)
if !ok {
m.Infof("skipping execution of '%s': unregistered function", fn.Name)
+ m.api.FUNCRESULT(fn.UID, "application/json", jsonErrorf("unregistered function: %s", fn.Name), "501")
continue
}
if function == nil {
m.Warningf("skipping execution of '%s': nil function registered", fn.Name)
+ m.api.FUNCRESULT(fn.UID, "application/json", jsonErrorf("nil function: %s", fn.Name), "501")
continue
}
- m.Debugf("executing function: '%s'", fn.String())
function(*fn)
}
}
-func (m *Manager) addFunction(name string, fn func(Function)) {
+func (m *Manager) Register(name string, fn func(Function)) {
+ if fn == nil {
+ m.Warningf("not registering '%s': nil function", name)
+ return
+ }
+
m.mux.Lock()
defer m.mux.Unlock()
@@ -126,6 +129,16 @@ func (m *Manager) addFunction(name string, fn func(Function)) {
m.FunctionRegistry[name] = fn
}
+func (m *Manager) Unregister(name string) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ if _, ok := m.FunctionRegistry[name]; !ok {
+ delete(m.FunctionRegistry, name)
+ m.Debugf("unregistering function '%s'", name)
+ }
+}
+
func (m *Manager) lookupFunction(name string) (func(Function), bool) {
m.mux.Lock()
defer m.mux.Unlock()
@@ -133,3 +146,10 @@ func (m *Manager) lookupFunction(name string) (func(Function), bool) {
f, ok := m.FunctionRegistry[name]
return f, ok
}
+
+func jsonErrorf(format string, a ...any) string {
+ msg := fmt.Sprintf(format, a...)
+ msg = strings.ReplaceAll(msg, "\n", " ")
+
+ return fmt.Sprintf(`{ "error": "%s" }`+"\n", msg)
+}
diff --git a/agent/jobmgr/cache.go b/agent/jobmgr/cache.go
index 53a1f7325..422674562 100644
--- a/agent/jobmgr/cache.go
+++ b/agent/jobmgr/cache.go
@@ -4,22 +4,72 @@ package jobmgr
import (
"context"
+ "sync"
"github.com/netdata/go.d.plugin/agent/confgroup"
+ "github.com/netdata/go.d.plugin/agent/module"
)
-func newRunningJobsCache() *runningJobsCache {
- return &runningJobsCache{}
+func newDiscoveredConfigsCache() *discoveredConfigs {
+ return &discoveredConfigs{
+ items: make(map[string]map[uint64]confgroup.Config),
+ }
+}
+
+func newSeenConfigCache() *seenConfigs {
+ return &seenConfigs{
+ items: make(map[string]*seenConfig),
+ }
+}
+
+func newExposedConfigCache() *exposedConfigs {
+ return &exposedConfigs{
+ items: make(map[string]*seenConfig),
+ }
}
-func newRetryingJobsCache() *retryingJobsCache {
- return &retryingJobsCache{}
+func newRunningJobsCache() *runningJobs {
+ return &runningJobs{
+ mux: sync.Mutex{},
+ items: make(map[string]*module.Job),
+ }
+}
+
+func newRetryingTasksCache() *retryingTasks {
+ return &retryingTasks{
+ items: make(map[string]*retryTask),
+ }
}
type (
- runningJobsCache map[string]bool
- retryingJobsCache map[uint64]retryTask
+ discoveredConfigs struct {
+ // [Source][Hash]
+ items map[string]map[uint64]confgroup.Config
+ }
+ seenConfigs struct {
+ // [cfg.UID()]
+ items map[string]*seenConfig
+ }
+ exposedConfigs struct {
+ // [cfg.FullName()]
+ items map[string]*seenConfig
+ }
+ seenConfig struct {
+ cfg confgroup.Config
+ status dyncfgStatus
+ }
+
+ runningJobs struct {
+ mux sync.Mutex
+ // [cfg.FullName()]
+ items map[string]*module.Job
+ }
+
+ retryingTasks struct {
+ // [cfg.UID()]
+ items map[string]*retryTask
+ }
retryTask struct {
cancel context.CancelFunc
timeout int
@@ -27,23 +77,112 @@ type (
}
)
-func (c runningJobsCache) put(cfg confgroup.Config) {
- c[cfg.FullName()] = true
+func (c *discoveredConfigs) add(group *confgroup.Group) (added, removed []confgroup.Config) {
+ cfgs, ok := c.items[group.Source]
+ if !ok {
+ cfgs = make(map[uint64]confgroup.Config)
+ c.items[group.Source] = cfgs
+ }
+
+ seen := make(map[uint64]bool)
+
+ for _, cfg := range group.Configs {
+ hash := cfg.Hash()
+ seen[hash] = true
+
+ if _, ok := cfgs[hash]; ok {
+ continue
+ }
+
+ cfgs[hash] = cfg
+ added = append(added, cfg)
+ }
+
+ for hash, cfg := range cfgs {
+ if !seen[hash] {
+ delete(cfgs, hash)
+ removed = append(removed, cfg)
+ }
+ }
+
+ if len(cfgs) == 0 {
+ delete(c.items, group.Source)
+ }
+
+ return added, removed
+}
+
+func (c *seenConfigs) add(sj *seenConfig) {
+ c.items[sj.cfg.UID()] = sj
+}
+func (c *seenConfigs) remove(cfg confgroup.Config) {
+ delete(c.items, cfg.UID())
+}
+func (c *seenConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) {
+ v, ok := c.items[cfg.UID()]
+ return v, ok
+}
+
+func (c *exposedConfigs) add(sj *seenConfig) {
+ c.items[sj.cfg.FullName()] = sj
+}
+func (c *exposedConfigs) remove(cfg confgroup.Config) {
+ delete(c.items, cfg.FullName())
+}
+func (c *exposedConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) {
+ v, ok := c.items[cfg.FullName()]
+ return v, ok
}
-func (c runningJobsCache) remove(cfg confgroup.Config) {
- delete(c, cfg.FullName())
+
+func (c *exposedConfigs) lookupByName(module, job string) (*seenConfig, bool) {
+ key := module + "_" + job
+ if module == job {
+ key = job
+ }
+ v, ok := c.items[key]
+ return v, ok
+}
+
+func (c *runningJobs) lock() {
+ c.mux.Lock()
+}
+func (c *runningJobs) unlock() {
+ c.mux.Unlock()
+}
+func (c *runningJobs) add(fullName string, job *module.Job) {
+ c.items[fullName] = job
}
-func (c runningJobsCache) has(cfg confgroup.Config) bool {
- return c[cfg.FullName()]
+func (c *runningJobs) remove(fullName string) {
+ delete(c.items, fullName)
+}
+func (c *runningJobs) has(fullName string) bool {
+ _, ok := c.lookup(fullName)
+ return ok
+}
+func (c *runningJobs) lookup(fullName string) (*module.Job, bool) {
+ j, ok := c.items[fullName]
+ return j, ok
+}
+func (c *runningJobs) forEach(fn func(fullName string, job *module.Job)) {
+ for k, j := range c.items {
+ fn(k, j)
+ }
}
-func (c retryingJobsCache) put(cfg confgroup.Config, retry retryTask) {
- c[cfg.Hash()] = retry
+func (c *retryingTasks) add(cfg confgroup.Config, retry *retryTask) {
+ c.items[cfg.UID()] = retry
+}
+func (c *retryingTasks) remove(cfg confgroup.Config) {
+ if v, ok := c.lookup(cfg); ok {
+ v.cancel()
+ }
+ delete(c.items, cfg.UID())
}
-func (c retryingJobsCache) remove(cfg confgroup.Config) {
- delete(c, cfg.Hash())
+func (c *retryingTasks) has(cfg confgroup.Config) bool {
+ _, ok := c.items[cfg.UID()]
+ return ok
}
-func (c retryingJobsCache) lookup(cfg confgroup.Config) (retryTask, bool) {
- v, ok := c[cfg.Hash()]
+func (c *retryingTasks) lookup(cfg confgroup.Config) (*retryTask, bool) {
+ v, ok := c.items[cfg.UID()]
return v, ok
}
diff --git a/agent/jobmgr/di.go b/agent/jobmgr/di.go
index fa567b2ce..98a274877 100644
--- a/agent/jobmgr/di.go
+++ b/agent/jobmgr/di.go
@@ -4,6 +4,7 @@ package jobmgr
import (
"github.com/netdata/go.d.plugin/agent/confgroup"
+ "github.com/netdata/go.d.plugin/agent/functions"
"github.com/netdata/go.d.plugin/agent/vnodes"
)
@@ -12,21 +13,27 @@ type FileLocker interface {
Unlock(name string) error
}
-type Vnodes interface {
- Lookup(key string) (*vnodes.VirtualNode, bool)
-}
-
-type StatusSaver interface {
+type FileStatus interface {
Save(cfg confgroup.Config, state string)
Remove(cfg confgroup.Config)
}
-type StatusStore interface {
+type FileStatusStore interface {
Contains(cfg confgroup.Config, states ...string) bool
}
-type Dyncfg interface {
- Register(cfg confgroup.Config)
- Unregister(cfg confgroup.Config)
- UpdateStatus(cfg confgroup.Config, status, payload string)
+type Vnodes interface {
+ Lookup(key string) (*vnodes.VirtualNode, bool)
+}
+
+type FunctionRegistry interface {
+ Register(name string, reg func(functions.Function))
+ Unregister(name string)
+}
+
+type DyncfgAPI interface {
+ CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string)
+ CONFIGDELETE(id string)
+ CONFIGSTATUS(id, status string)
+ FUNCRESULT(uid, contentType, payload, code string)
}
diff --git a/agent/jobmgr/dyncfg.go b/agent/jobmgr/dyncfg.go
new file mode 100644
index 000000000..b501c2a29
--- /dev/null
+++ b/agent/jobmgr/dyncfg.go
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "encoding/json"
+ "fmt"
+ "log/slog"
+ "strconv"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/agent/confgroup"
+ "github.com/netdata/go.d.plugin/agent/functions"
+ "github.com/netdata/go.d.plugin/logger"
+
+ "gopkg.in/yaml.v2"
+)
+
+type dyncfgStatus int
+
+const (
+ _ dyncfgStatus = iota
+ dyncfgAccepted
+ dyncfgRunning
+ dyncfgFailed
+ dyncfgIncomplete
+ dyncfgDisabled
+)
+
+func (s dyncfgStatus) String() string {
+ switch s {
+ case dyncfgAccepted:
+ return "accepted"
+ case dyncfgRunning:
+ return "running"
+ case dyncfgFailed:
+ return "failed"
+ case dyncfgIncomplete:
+ return "incomplete"
+ case dyncfgDisabled:
+ return "disabled"
+ default:
+ return "unknown"
+ }
+}
+
+const (
+ dyncfgIDPrefix = "go.d:collector:"
+ dyncfgPath = "/collectors/jobs"
+)
+
+func dyncfgModID(name string) string {
+ return fmt.Sprintf("%s%s", dyncfgIDPrefix, name)
+}
+func dyncfgJobID(cfg confgroup.Config) string {
+ return fmt.Sprintf("%s%s:%s", dyncfgIDPrefix, cfg.Module(), cfg.Name())
+}
+
+func dyncfgModCmds() string {
+ return "add schema enable disable test"
+}
+func dyncfgJobCmds(cfg confgroup.Config) string {
+ if cfg.SourceType() == "dyncfg" {
+ return "schema get enable disable update restart test remove"
+ }
+ return "schema get restart"
+}
+
+func (m *Manager) dyncfgModuleCreate(name string) {
+ id := dyncfgModID(name)
+ path := dyncfgPath
+ cmds := dyncfgModCmds()
+ typ := "template"
+ src := "internal"
+ m.api.CONFIGCREATE(id, dyncfgAccepted.String(), typ, path, src, src, cmds)
+}
+
+func (m *Manager) dyncfgJobCreate(cfg confgroup.Config, status dyncfgStatus) {
+ id := dyncfgJobID(cfg)
+ path := dyncfgPath
+ cmds := dyncfgJobCmds(cfg)
+ typ := "job"
+ m.api.CONFIGCREATE(id, status.String(), typ, path, cfg.SourceType(), cfg.Source(), cmds)
+}
+
+func (m *Manager) dyncfgJobRemove(cfg confgroup.Config) {
+ m.api.CONFIGDELETE(dyncfgJobID(cfg))
+}
+
+func (m *Manager) dyncfgJobStatus(cfg confgroup.Config, status dyncfgStatus) {
+ m.api.CONFIGSTATUS(dyncfgJobID(cfg), status.String())
+}
+
+func (m *Manager) dyncfgConfig(fn functions.Function) {
+ if len(fn.Args) < 2 {
+ m.Warningf("dyncfg: %s: missing required arguments, want 3 got %d", fn.Name, len(fn.Args))
+ m.dyncfgRespf(fn, 400, "Missing required arguments. Need at least 2, but got %d.", len(fn.Args))
+ return
+ }
+
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ select {
+ case <-m.ctx.Done():
+ m.dyncfgRespf(fn, 503, "Job manager is shutting down.")
+ return
+ default:
+ }
+
+ action := strings.ToLower(fn.Args[1])
+
+ m.Infof("QQ FN(%s): '%s'", action, fn)
+
+ switch action {
+ case "test":
+ m.dyncfgConfigTest(fn)
+ case "schema":
+ m.dyncfgConfigSchema(fn)
+ case "get":
+ m.dyncfgConfigGet(fn)
+ case "remove":
+ m.dyncfgConfigRemove(fn)
+ case "restart":
+ m.dyncfgConfigRestart(fn)
+ case "enable":
+ m.dyncfgConfigEnable(fn)
+ case "disable":
+ m.dyncfgConfigDisable(fn)
+ case "add":
+ m.dyncfgConfigAdd(fn)
+ case "update":
+ m.dyncfgConfigUpdate(fn)
+ default:
+ m.Warningf("dyncfg: function '%s' not implemented", fn.String())
+ m.dyncfgRespf(fn, 501, "Function '%s' is not implemented.", fn.Name)
+ }
+}
+
+func (m *Manager) dyncfgConfigTest(fn functions.Function) {
+ id := fn.Args[0]
+ mn, ok := extractModuleName(id)
+ if !ok {
+ m.Warningf("dyncfg: test: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400,
+ "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ creator, ok := m.Modules.Lookup(mn)
+ if !ok {
+ m.Warningf("dyncfg: test: module %s not found", mn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn)
+ return
+ }
+
+ cfg, err := configFromPayload(fn)
+ if err != nil {
+ m.Warningf("dyncfg: test: module %s: failed to create config from payload: %v", mn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err)
+ return
+ }
+
+ cfg.SetModule(mn)
+ cfg.SetName("test")
+
+ job := creator.Create()
+
+ if err := applyConfig(cfg, job); err != nil {
+ m.Warningf("dyncfg: test: module %s: failed to apply config: %v", mn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ return
+ }
+
+ job.GetBase().Logger = logger.New().With(
+ slog.String("collector", cfg.Module()),
+ slog.String("job", cfg.Name()),
+ )
+
+ defer job.Cleanup()
+
+ if err := job.Init(); err != nil {
+ m.dyncfgRespf(fn, 500, "Job initialization failed: %v", err)
+ return
+ }
+ if err := job.Check(); err != nil {
+ m.dyncfgRespf(fn, 503, "Job check failed: %v", err)
+ return
+ }
+
+ m.dyncfgRespf(fn, 200, "")
+}
+
+func (m *Manager) dyncfgConfigSchema(fn functions.Function) {
+ id := fn.Args[0]
+ mn, ok := extractModuleName(id)
+ if !ok {
+ m.Warningf("dyncfg: schema: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ mod, ok := m.Modules.Lookup(mn)
+ if !ok {
+ m.Warningf("dyncfg: schema: module %s not found", mn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn)
+ return
+ }
+
+ if mod.JobConfigSchema == "" {
+ m.Warningf("dyncfg: schema: module %s: schema not found", mn)
+ m.dyncfgRespf(fn, 500, "Module %s configuration schema not found.", mn)
+ return
+ }
+
+ m.dyncfgRespPayload(fn, mod.JobConfigSchema)
+}
+
+func (m *Manager) dyncfgConfigGet(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: get: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400,
+ "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ creator, ok := m.Modules.Lookup(mn)
+ if !ok {
+ m.Warningf("dyncfg: get: module %s not found", mn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: get: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ mod := creator.Create()
+
+ if err := applyConfig(ecfg.cfg, mod); err != nil {
+ m.Warningf("dyncfg: get: module %s job %s failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ return
+ }
+
+ conf := mod.Configuration()
+ if conf == nil {
+ m.Warningf("dyncfg: get: module %s: configuration not found", mn)
+ m.dyncfgRespf(fn, 500, "Module %s does not provide configuration.", mn)
+ return
+ }
+
+ bs, err := json.Marshal(conf)
+ if err != nil {
+ m.Warningf("dyncfg: get: module %s job %s failed marshal config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 500, "Failed to convert configuration into JSON: %v.", err)
+ return
+ }
+
+ m.dyncfgRespPayload(fn, string(bs))
+}
+
+func (m *Manager) dyncfgConfigAdd(fn functions.Function) {
+ if len(fn.Args) < 3 {
+ m.Warningf("dyncfg: add: missing required arguments, want 3 got %d", len(fn.Args))
+ m.dyncfgRespf(fn, 400, "Missing required arguments. Need at least 3, but got %d.", len(fn.Args))
+ return
+ }
+
+ id := fn.Args[0]
+ jn := fn.Args[2]
+ mn, ok := extractModuleName(id)
+ if !ok {
+ m.Warningf("dyncfg: add: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ if len(fn.Payload) == 0 {
+ m.Warningf("dyncfg: add: module %s job %s missing configuration payload.", mn, jn)
+ m.dyncfgRespf(fn, 400, "Missing configuration payload.")
+ return
+ }
+
+ cfg, err := configFromPayload(fn)
+ if err != nil {
+ m.Warningf("dyncfg: add: module %s job %s: failed to create config from payload: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err)
+ return
+ }
+
+ m.dyncfgSetConfigMeta(cfg, mn, jn)
+
+ scfg := &seenConfig{cfg: cfg}
+ m.seenConfigs.add(scfg)
+
+ ecfg, ok := m.exposedConfigs.lookup(cfg)
+ if ok {
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.stopRunningJob(ecfg.cfg.FullName())
+ }
+ ecfg = scfg
+ m.exposedConfigs.add(ecfg)
+
+ if _, err := m.createCollectorJob(ecfg.cfg); err != nil {
+ // TODO: remove from exposed
+ ecfg.status = dyncfgFailed
+ m.Warningf("dyncfg: add: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ ecfg.status = dyncfgAccepted
+ m.dyncfgRespf(fn, 202, "")
+ m.dyncfgJobCreate(ecfg.cfg, ecfg.status)
+}
+
+func (m *Manager) dyncfgConfigRemove(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: remove: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: remove: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ if ecfg.cfg.SourceType() != "dyncfg" {
+ m.Warningf("dyncfg: remove: module %s job %s: can not remove jobs of type %s", mn, jn, ecfg.cfg.SourceType())
+ m.dyncfgRespf(fn, 405, "Removing jobs of type '%s' is not supported. Only 'dyncfg' jobs can be removed.", ecfg.cfg.SourceType())
+ return
+ }
+
+ m.seenConfigs.remove(ecfg.cfg)
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.stopRunningJob(ecfg.cfg.FullName())
+
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobRemove(ecfg.cfg)
+}
+
+func (m *Manager) dyncfgConfigRestart(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: restart: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: restart: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ job, err := m.createCollectorJob(ecfg.cfg)
+ if err != nil {
+ m.Warningf("dyncfg: restart: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ switch ecfg.status {
+ case dyncfgAccepted, dyncfgDisabled:
+ m.Warningf("dyncfg: restart: module %s job %s: restarting not allowed in %s", mn, jn, ecfg.status)
+ m.dyncfgRespf(fn, 405, "Restarting data collection job is not allowed in '%s' state.", ecfg.status)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ case dyncfgRunning:
+ m.stopRunningJob(ecfg.cfg.FullName())
+ default:
+ }
+
+ if err := job.AutoDetection(); err != nil {
+ job.Cleanup()
+ ecfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 503, "Job restart failed: %v", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ m.startRunningJob(job)
+ ecfg.status = dyncfgRunning
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+}
+
+func (m *Manager) dyncfgConfigEnable(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: enable: could not extract module and job from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: enable: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ switch ecfg.status {
+ case dyncfgAccepted, dyncfgDisabled:
+ default:
+ // todo: now allowed
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ job, err := m.createCollectorJob(ecfg.cfg)
+ if err != nil {
+ ecfg.status = dyncfgFailed
+ m.Warningf("dyncfg: enable: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ // TODO: retry
+ if err := job.AutoDetection(); err != nil {
+ job.Cleanup()
+ if ecfg.cfg.SourceType() == "stock" {
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.dyncfgJobRemove(ecfg.cfg)
+ } else {
+ ecfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 200, "Job enable failed: %v", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ }
+ return
+ }
+
+ ecfg.status = dyncfgRunning
+ m.startRunningJob(job)
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+
+}
+
+func (m *Manager) dyncfgConfigDisable(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: disable: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: disable: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ switch ecfg.status {
+ case dyncfgDisabled:
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ case dyncfgRunning:
+ m.stopRunningJob(ecfg.cfg.FullName())
+ default:
+ }
+
+ ecfg.status = dyncfgDisabled
+
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+}
+
+func (m *Manager) dyncfgConfigUpdate(fn functions.Function) {
+ id := fn.Args[0]
+ mn, jn, ok := extractModuleJobName(id)
+ if !ok {
+ m.Warningf("dyncfg: update: could not extract module from id (%s)", id)
+ m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id)
+ return
+ }
+
+ ecfg, ok := m.exposedConfigs.lookupByName(mn, jn)
+ if !ok {
+ m.Warningf("dyncfg: update: module %s job %s not found", mn, jn)
+ m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn)
+ return
+ }
+
+ cfg, err := configFromPayload(fn)
+ if err != nil {
+ m.Warningf("dyncfg: update: module %s: failed to create config from payload: %v", mn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ m.dyncfgSetConfigMeta(cfg, mn, jn)
+
+ if ecfg.status == dyncfgRunning && ecfg.cfg.UID() == cfg.UID() {
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ job, err := m.createCollectorJob(cfg)
+ if err != nil {
+ m.Warningf("dyncfg: update: module %s job %s: failed to apply config: %v", mn, jn, err)
+ m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ if ecfg.status == dyncfgAccepted {
+ m.Warningf("dyncfg: update: module %s job %s: updating not allowed in %s", mn, jn, ecfg.status)
+ m.dyncfgRespf(fn, 403, "Updating data collection job is not allowed in current state: '%s'.", ecfg.status)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ if ecfg.cfg.SourceType() == "dyncfg" {
+ m.seenConfigs.remove(ecfg.cfg)
+ }
+ m.exposedConfigs.remove(ecfg.cfg)
+ m.stopRunningJob(ecfg.cfg.FullName())
+
+ scfg := &seenConfig{cfg: cfg}
+ m.seenConfigs.add(scfg)
+ m.exposedConfigs.add(scfg)
+
+ if ecfg.status == dyncfgDisabled {
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(cfg, scfg.status)
+ return
+ }
+
+ if err := job.AutoDetection(); err != nil {
+ job.Cleanup()
+ ecfg.status = dyncfgFailed
+ m.dyncfgRespf(fn, 200, "Job update failed: %v", err)
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+ return
+ }
+
+ ecfg.status = dyncfgRunning
+ m.startRunningJob(job)
+ m.dyncfgRespf(fn, 200, "")
+ m.dyncfgJobStatus(ecfg.cfg, ecfg.status)
+}
+
+func (m *Manager) dyncfgSetConfigMeta(cfg confgroup.Config, module, name string) {
+ cfg.SetProvider("dyncfg")
+ cfg.SetSource(fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, name))
+ cfg.SetSourceType("dyncfg")
+ cfg.SetModule(module)
+ cfg.SetName(name)
+ if def, ok := m.ConfigDefaults.Lookup(module); ok {
+ cfg.ApplyDefaults(def)
+ }
+}
+
+func (m *Manager) dyncfgRespPayload(fn functions.Function, payload string) {
+ m.api.FUNCRESULT(fn.UID, "application/json", payload, "200")
+}
+
+func (m *Manager) dyncfgRespf(fn functions.Function, code int, msgf string, a ...any) {
+ if fn.UID == "" {
+ return
+ }
+ bs, _ := json.Marshal(struct {
+ Status int `json:"status"`
+ Message string `json:"message"`
+ }{
+ Status: code,
+ Message: fmt.Sprintf(msgf, a...),
+ })
+ m.api.FUNCRESULT(fn.UID, "application/json", string(bs), strconv.Itoa(code))
+}
+
+func configFromPayload(fn functions.Function) (confgroup.Config, error) {
+ var cfg confgroup.Config
+
+ if fn.ContentType != "application/json" {
+ if err := yaml.Unmarshal(fn.Payload, &cfg); err != nil {
+ return nil, err
+ }
+
+ return cfg, nil
+ }
+
+ if err := json.Unmarshal(fn.Payload, &cfg); err != nil {
+ return nil, err
+ }
+
+ return cfg.Clone()
+}
+
+func extractModuleJobName(id string) (mn string, jn string, ok bool) {
+ if mn, ok = extractModuleName(id); !ok {
+ return "", "", false
+ }
+ if jn, ok = extractJobName(id); !ok {
+ return "", "", false
+ }
+ return mn, jn, true
+}
+
+func extractModuleName(id string) (string, bool) {
+ id = strings.TrimPrefix(id, dyncfgIDPrefix)
+ i := strings.IndexByte(id, ':')
+ if i == -1 {
+ return id, id != ""
+ }
+ return id[:i], true
+}
+
+func extractJobName(id string) (string, bool) {
+ i := strings.LastIndexByte(id, ':')
+ if i == -1 {
+ return "", false
+ }
+ return id[i+1:], true
+}
diff --git a/agent/jobmgr/manager.go b/agent/jobmgr/manager.go
index 7088f84f9..49ef5f3c6 100644
--- a/agent/jobmgr/manager.go
+++ b/agent/jobmgr/manager.go
@@ -7,63 +7,42 @@ import (
"fmt"
"io"
"log/slog"
- "os"
"strings"
"sync"
"time"
"github.com/netdata/go.d.plugin/agent/confgroup"
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/agent/netdataapi"
+ "github.com/netdata/go.d.plugin/agent/safewriter"
+ "github.com/netdata/go.d.plugin/agent/ticker"
"github.com/netdata/go.d.plugin/logger"
"gopkg.in/yaml.v2"
)
-type Job interface {
- Name() string
- ModuleName() string
- FullName() string
- AutoDetection() bool
- AutoDetectionEvery() int
- RetryAutoDetection() bool
- Tick(clock int)
- Start()
- Stop()
- Cleanup()
-}
-
-type jobStatus = string
-
-const (
- jobStatusRunning jobStatus = "running" // Check() succeeded
- jobStatusRetrying jobStatus = "retrying" // Check() failed, but we need keep trying auto-detection
- jobStatusStoppedFailed jobStatus = "stopped_failed" // Check() failed
- jobStatusStoppedDupLocal jobStatus = "stopped_duplicate_local" // a job with the same FullName is running
- jobStatusStoppedDupGlobal jobStatus = "stopped_duplicate_global" // a job with the same FullName is registered by another plugin
- jobStatusStoppedRegErr jobStatus = "stopped_registration_error" // an error during registration (only 'too many open files')
- jobStatusStoppedCreateErr jobStatus = "stopped_creation_error" // an error during creation (yaml unmarshal)
-)
-
-func NewManager() *Manager {
- np := noop{}
+func New() *Manager {
mgr := &Manager{
Logger: logger.New().With(
slog.String("component", "job manager"),
),
- Out: io.Discard,
- FileLock: np,
- StatusSaver: np,
- StatusStore: np,
- Vnodes: np,
- Dyncfg: np,
-
- confGroupCache: confgroup.NewCache(),
-
- runningJobs: newRunningJobsCache(),
- retryingJobs: newRetryingJobsCache(),
-
- addCh: make(chan confgroup.Config),
- removeCh: make(chan confgroup.Config),
+ Out: io.Discard,
+ FileLock: noop{},
+ FileStatus: noop{},
+ FileStatusStore: noop{},
+ Vnodes: noop{},
+ FnReg: noop{},
+
+ discoveredConfigs: newDiscoveredConfigsCache(),
+ seenConfigs: newSeenConfigCache(),
+ exposedConfigs: newExposedConfigCache(),
+ runningJobs: newRunningJobsCache(),
+ retryingTasks: newRetryingTasksCache(),
+
+ retryCh: make(chan confgroup.Config),
+ api: netdataapi.New(safewriter.Stdout),
+ mux: sync.Mutex{},
+ started: make(chan struct{}),
}
return mgr
@@ -72,210 +51,274 @@ func NewManager() *Manager {
type Manager struct {
*logger.Logger
- PluginName string
- Out io.Writer
- Modules module.Registry
-
- FileLock FileLocker
- StatusSaver StatusSaver
- StatusStore StatusStore
- Vnodes Vnodes
- Dyncfg Dyncfg
-
- confGroupCache *confgroup.Cache
- runningJobs *runningJobsCache
- retryingJobs *retryingJobsCache
-
- addCh chan confgroup.Config
- removeCh chan confgroup.Config
-
- queueMux sync.Mutex
- queue []Job
+ PluginName string
+ Out io.Writer
+ Modules module.Registry
+ ConfigDefaults confgroup.Registry
+
+ FileLock FileLocker
+ FileStatus FileStatus
+ FileStatusStore FileStatusStore
+ Vnodes Vnodes
+ FnReg FunctionRegistry
+
+ discoveredConfigs *discoveredConfigs
+ seenConfigs *seenConfigs
+ exposedConfigs *exposedConfigs
+ retryingTasks *retryingTasks
+ runningJobs *runningJobs
+
+ api DyncfgAPI
+ ctx context.Context
+ retryCh chan confgroup.Config
+ mux sync.Mutex
+
+ started chan struct{}
}
func (m *Manager) Run(ctx context.Context, in chan []*confgroup.Group) {
m.Info("instance is started")
defer func() { m.cleanup(); m.Info("instance is stopped") }()
+ m.ctx = ctx
+
+ m.FnReg.Register("config", m.dyncfgConfig)
+
+ for name := range m.Modules {
+ m.dyncfgModuleCreate(name)
+ }
var wg sync.WaitGroup
wg.Add(1)
- go func() { defer wg.Done(); m.runConfigGroupsHandling(ctx, in) }()
+ go func() { defer wg.Done(); m.runProcessDiscoveredConfigs(in) }()
wg.Add(1)
- go func() { defer wg.Done(); m.runConfigsHandling(ctx) }()
+ go func() { defer wg.Done(); m.runNotifyRunningJobs() }()
- wg.Add(1)
- go func() { defer wg.Done(); m.runRunningJobsHandling(ctx) }()
+ close(m.started)
wg.Wait()
- <-ctx.Done()
+ <-m.ctx.Done()
}
-func (m *Manager) runConfigGroupsHandling(ctx context.Context, in chan []*confgroup.Group) {
+func (m *Manager) runProcessDiscoveredConfigs(in chan []*confgroup.Group) {
for {
select {
- case <-ctx.Done():
+ case <-m.ctx.Done():
return
case groups := <-in:
- for _, gr := range groups {
- select {
- case <-ctx.Done():
- return
- default:
- a, r := m.confGroupCache.Add(gr)
- m.Debugf("received config group ('%s'): %d jobs (added: %d, removed: %d)", gr.Source, len(gr.Configs), len(a), len(r))
- sendConfigs(ctx, m.removeCh, r)
- sendConfigs(ctx, m.addCh, a)
- }
- }
+ m.processDiscoveredConfigGroups(groups)
+ case cfg := <-m.retryCh:
+ m.addDiscoveredConfig(cfg)
}
}
}
-func (m *Manager) runConfigsHandling(ctx context.Context) {
- for {
- select {
- case <-ctx.Done():
- return
- case cfg := <-m.addCh:
- m.addConfig(ctx, cfg)
- case cfg := <-m.removeCh:
- m.removeConfig(cfg)
+func (m *Manager) processDiscoveredConfigGroups(groups []*confgroup.Group) {
+ for _, gr := range groups {
+ a, r := m.discoveredConfigs.add(gr)
+ m.Debugf("received configs: %d/+%d/-%d (group '%s')", len(gr.Configs), len(a), len(r), gr.Source)
+ for _, cfg := range r {
+ m.removeDiscoveredConfig(cfg)
+ }
+ for _, cfg := range a {
+ m.addDiscoveredConfig(cfg)
}
}
}
-func (m *Manager) cleanup() {
- for _, task := range *m.retryingJobs {
- task.cancel()
+func (m *Manager) addDiscoveredConfig(cfg confgroup.Config) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ task, isRetry := m.retryingTasks.lookup(cfg)
+ if isRetry {
+ m.retryingTasks.remove(cfg)
}
- for name := range *m.runningJobs {
- _ = m.FileLock.Unlock(name)
+
+ scfg, ok := m.seenConfigs.lookup(cfg)
+ if !ok {
+ scfg = &seenConfig{cfg: cfg}
+ m.seenConfigs.add(scfg)
}
- // TODO: m.Dyncfg.Register() ?
- m.stopRunningJobs()
-}
-func (m *Manager) addConfig(ctx context.Context, cfg confgroup.Config) {
- task, isRetry := m.retryingJobs.lookup(cfg)
- if isRetry {
- task.cancel()
- m.retryingJobs.remove(cfg)
- } else {
- m.Dyncfg.Register(cfg)
+ ecfg, ok := m.exposedConfigs.lookup(cfg)
+ if !ok {
+ ecfg = scfg
+ m.exposedConfigs.add(ecfg)
}
- if m.runningJobs.has(cfg) {
- m.Infof("%s[%s] job is being served by another job, skipping it", cfg.Module(), cfg.Name())
- m.StatusSaver.Save(cfg, jobStatusStoppedDupLocal)
- m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another job")
- return
+ if ok {
+ sp, ep := scfg.cfg.SourceTypePriority(), ecfg.cfg.SourceTypePriority()
+ if ep > sp || (ep == sp && ecfg.status == dyncfgRunning) {
+ return
+ }
+ m.stopRunningJob(ecfg.cfg.FullName())
+ m.exposedConfigs.add(scfg) // replace
+ ecfg = scfg
}
- job, err := m.createJob(cfg)
+ job, err := m.createCollectorJob(ecfg.cfg)
if err != nil {
- m.Warningf("couldn't create %s[%s]: %v", cfg.Module(), cfg.Name(), err)
- m.StatusSaver.Save(cfg, jobStatusStoppedCreateErr)
- m.Dyncfg.UpdateStatus(cfg, "error", fmt.Sprintf("build error: %s", err))
+ ecfg.status = dyncfgFailed
+ if !isStock(ecfg.cfg) {
+ m.dyncfgJobCreate(ecfg.cfg, ecfg.status)
+ }
return
}
- cleanupJob := true
- defer func() {
- if cleanupJob {
- job.Cleanup()
- }
- }()
-
if isRetry {
job.AutoDetectEvery = task.timeout
job.AutoDetectTries = task.retries
} else if job.AutoDetectionEvery() == 0 {
- switch {
- case m.StatusStore.Contains(cfg, jobStatusRunning, jobStatusRetrying):
- m.Infof("%s[%s] job last status is running/retrying, applying recovering settings", cfg.Module(), cfg.Name())
- job.AutoDetectEvery = 30
- job.AutoDetectTries = 11
- case isInsideK8sCluster() && cfg.Provider() == "file watcher":
- m.Infof("%s[%s] is k8s job, applying recovering settings", cfg.Module(), cfg.Name())
- job.AutoDetectEvery = 10
- job.AutoDetectTries = 7
+ if m.FileStatusStore.Contains(ecfg.cfg, "") {
+
}
}
- switch detection(job) {
- case jobStatusRunning:
- if ok, err := m.FileLock.Lock(cfg.FullName()); ok || err != nil && !isTooManyOpenFiles(err) {
- cleanupJob = false
- m.runningJobs.put(cfg)
- m.StatusSaver.Save(cfg, jobStatusRunning)
- m.Dyncfg.UpdateStatus(cfg, "running", "")
- m.startJob(job)
- } else if isTooManyOpenFiles(err) {
- m.Error(err)
- m.StatusSaver.Save(cfg, jobStatusStoppedRegErr)
- m.Dyncfg.UpdateStatus(cfg, "error", "too many open files")
- } else {
- m.Infof("%s[%s] job is being served by another plugin, skipping it", cfg.Module(), cfg.Name())
- m.StatusSaver.Save(cfg, jobStatusStoppedDupGlobal)
- m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another plugin")
+ if err := job.AutoDetection(); err != nil {
+ job.Cleanup()
+ ecfg.status = dyncfgFailed
+ if !isStock(ecfg.cfg) {
+ m.dyncfgJobCreate(ecfg.cfg, ecfg.status)
}
- case jobStatusRetrying:
- m.Infof("%s[%s] job detection failed, will retry in %d seconds", cfg.Module(), cfg.Name(), job.AutoDetectionEvery())
- ctx, cancel := context.WithCancel(ctx)
- m.retryingJobs.put(cfg, retryTask{
- cancel: cancel,
- timeout: job.AutoDetectionEvery(),
- retries: job.AutoDetectTries,
- })
- go runRetryTask(ctx, m.addCh, cfg, time.Second*time.Duration(job.AutoDetectionEvery()))
- m.StatusSaver.Save(cfg, jobStatusRetrying)
- m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, will retry later")
- case jobStatusStoppedFailed:
- m.StatusSaver.Save(cfg, jobStatusStoppedFailed)
- m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, stopping it")
- default:
- m.Warningf("%s[%s] job detection: unknown state", cfg.Module(), cfg.Name())
+ if job.RetryAutoDetection() {
+ ctx, cancel := context.WithCancel(m.ctx)
+ r := &retryTask{cancel: cancel, timeout: job.AutoDetectionEvery(), retries: job.AutoDetectTries}
+ m.retryingTasks.add(cfg, r)
+ go runRetryTask(ctx, m.retryCh, ecfg.cfg)
+ }
+ return
}
+
+ ecfg.status = dyncfgRunning
+ m.startRunningJob(job)
+ m.dyncfgJobCreate(ecfg.cfg, ecfg.status)
}
-func (m *Manager) removeConfig(cfg confgroup.Config) {
- if m.runningJobs.has(cfg) {
- m.stopJob(cfg.FullName())
- _ = m.FileLock.Unlock(cfg.FullName())
- m.runningJobs.remove(cfg)
+func runRetryTask(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config) {
+ t := time.NewTimer(time.Second * time.Duration(cfg.AutoDetectionRetry()))
+ defer t.Stop()
+
+ select {
+ case <-ctx.Done():
+ case <-t.C:
+ select {
+ case <-ctx.Done():
+ case out <- cfg:
+ }
}
+}
+
+func (m *Manager) removeDiscoveredConfig(cfg confgroup.Config) {
+ m.mux.Lock()
+ defer m.mux.Unlock()
- if task, ok := m.retryingJobs.lookup(cfg); ok {
- task.cancel()
- m.retryingJobs.remove(cfg)
+ m.retryingTasks.remove(cfg)
+
+ scfg, ok := m.seenConfigs.lookup(cfg)
+ if !ok {
+ return
}
+ m.seenConfigs.remove(cfg)
- m.StatusSaver.Remove(cfg)
- m.Dyncfg.Unregister(cfg)
+ ecfg, ok := m.exposedConfigs.lookup(cfg)
+ if !ok {
+ return
+ }
+ if scfg.cfg.UID() == ecfg.cfg.UID() {
+ m.exposedConfigs.remove(cfg)
+ m.stopRunningJob(cfg.FullName())
+ m.dyncfgJobRemove(cfg)
+ }
+
+ return
}
-func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) {
+func (m *Manager) runNotifyRunningJobs() {
+ tk := ticker.New(time.Second)
+ defer tk.Stop()
+
+ for {
+ select {
+ case <-m.ctx.Done():
+ return
+ case clock := <-tk.C:
+ m.runningJobs.lock()
+ m.runningJobs.forEach(func(_ string, job *module.Job) {
+ job.Tick(clock)
+ })
+ m.runningJobs.unlock()
+ }
+ }
+}
+
+func (m *Manager) cleanup() {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ m.FnReg.Unregister("config")
+
+ m.runningJobs.lock()
+ defer m.runningJobs.unlock()
+
+ m.runningJobs.forEach(func(key string, job *module.Job) {
+ job.Stop()
+ m.runningJobs.remove(key)
+ })
+}
+
+func (m *Manager) startRunningJob(job *module.Job) {
+ m.runningJobs.lock()
+ defer m.runningJobs.unlock()
+
+ if job, ok := m.runningJobs.lookup(job.FullName()); ok {
+ job.Stop()
+ }
+
+ go job.Start()
+ m.runningJobs.add(job.FullName(), job)
+}
+
+func (m *Manager) stopRunningJob(name string) {
+ m.runningJobs.lock()
+ defer m.runningJobs.unlock()
+
+ if job, ok := m.runningJobs.lookup(name); ok {
+ job.Stop()
+ m.runningJobs.remove(name)
+ }
+}
+
+func (m *Manager) createCollectorJob(cfg confgroup.Config) (*module.Job, error) {
creator, ok := m.Modules[cfg.Module()]
if !ok {
return nil, fmt.Errorf("can not find %s module", cfg.Module())
}
+ var vnode struct {
+ guid string
+ hostname string
+ labels map[string]string
+ }
+
+ if cfg.Vnode() != "" {
+ n, ok := m.Vnodes.Lookup(cfg.Vnode())
+ if !ok {
+ return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode())
+ }
+
+ vnode.guid = n.GUID
+ vnode.hostname = n.Hostname
+ vnode.labels = n.Labels
+ }
+
m.Debugf("creating %s[%s] job, config: %v", cfg.Module(), cfg.Name(), cfg)
mod := creator.Create()
- if err := unmarshal(cfg, mod); err != nil {
- return nil, err
- }
- labels := make(map[string]string)
- for name, value := range cfg.Labels() {
- n, ok1 := name.(string)
- v, ok2 := value.(string)
- if ok1 && ok2 {
- labels[n] = v
- }
+ if err := applyConfig(cfg, mod); err != nil {
+ return nil, err
}
jobCfg := module.JobConfig{
@@ -286,21 +329,13 @@ func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) {
UpdateEvery: cfg.UpdateEvery(),
AutoDetectEvery: cfg.AutoDetectionRetry(),
Priority: cfg.Priority(),
- Labels: labels,
- IsStock: isStockConfig(cfg),
+ Labels: makeLabels(cfg),
+ IsStock: cfg.SourceType() == "stock",
Module: mod,
Out: m.Out,
- }
-
- if cfg.Vnode() != "" {
- n, ok := m.Vnodes.Lookup(cfg.Vnode())
- if !ok {
- return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode())
- }
-
- jobCfg.VnodeGUID = n.GUID
- jobCfg.VnodeHostname = n.Hostname
- jobCfg.VnodeLabels = n.Labels
+ VnodeGUID: vnode.guid,
+ VnodeHostname: vnode.hostname,
+ VnodeLabels: vnode.labels,
}
job := module.NewJob(jobCfg)
@@ -308,62 +343,30 @@ func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) {
return job, nil
}
-func detection(job Job) jobStatus {
- if !job.AutoDetection() {
- if job.RetryAutoDetection() {
- return jobStatusRetrying
- } else {
- return jobStatusStoppedFailed
- }
- }
- return jobStatusRunning
-}
-
-func runRetryTask(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config, timeout time.Duration) {
- t := time.NewTimer(timeout)
- defer t.Stop()
-
- select {
- case <-ctx.Done():
- case <-t.C:
- sendConfig(ctx, out, cfg)
- }
-}
-
-func sendConfigs(ctx context.Context, out chan<- confgroup.Config, cfgs []confgroup.Config) {
- for _, cfg := range cfgs {
- sendConfig(ctx, out, cfg)
- }
+func isStock(cfg confgroup.Config) bool {
+ return cfg.SourceType() == "stock"
}
-func sendConfig(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config) {
- select {
- case <-ctx.Done():
- return
- case out <- cfg:
- }
-}
-
-func unmarshal(conf interface{}, module interface{}) error {
- bs, err := yaml.Marshal(conf)
+func applyConfig(cfg confgroup.Config, module any) error {
+ bs, err := yaml.Marshal(cfg)
if err != nil {
return err
}
return yaml.Unmarshal(bs, module)
}
-func isInsideK8sCluster() bool {
- host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT")
- return host != "" && port != ""
-}
-
func isTooManyOpenFiles(err error) bool {
return err != nil && strings.Contains(err.Error(), "too many open files")
}
-func isStockConfig(cfg confgroup.Config) bool {
- if !strings.HasPrefix(cfg.Provider(), "file") {
- return false
+func makeLabels(cfg confgroup.Config) map[string]string {
+ labels := make(map[string]string)
+ for name, value := range cfg.Labels() {
+ n, ok1 := name.(string)
+ v, ok2 := value.(string)
+ if ok1 && ok2 {
+ labels[n] = v
+ }
}
- return !strings.Contains(cfg.Source(), "/etc/netdata")
+ return labels
}
diff --git a/agent/jobmgr/manager_test.go b/agent/jobmgr/manager_test.go
index 69dceda49..4c9414908 100644
--- a/agent/jobmgr/manager_test.go
+++ b/agent/jobmgr/manager_test.go
@@ -3,102 +3,1251 @@
package jobmgr
import (
- "bytes"
- "context"
- "sync"
+ "encoding/json"
+ "fmt"
"testing"
- "time"
"github.com/netdata/go.d.plugin/agent/confgroup"
- "github.com/netdata/go.d.plugin/agent/module"
- "github.com/netdata/go.d.plugin/agent/safewriter"
- "github.com/stretchr/testify/assert"
+ "github.com/netdata/go.d.plugin/agent/functions"
)
-// TODO: tech dept
-func TestNewManager(t *testing.T) {
+func TestManager_Run_Dyncfg_Get(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[get] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-get",
+ Args: []string{dyncfgJobID(cfg), "get"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-get 404 application/json 404
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[get] existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "1").
+ Set("option_int", 1)
+ bs, _ := json.Marshal(cfg)
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: bs,
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-get",
+ Args: []string{dyncfgJobID(cfg), "get"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 202 application/json 202
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-get 200 application/json 200
+{"option_str":"1","option_int":1}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Add(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[add] dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+`,
+ }
+ },
+ },
+ "[add] dyncfg:nok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status accepted
+`,
+ }
+ },
+ },
+ "[add] dyncfg:ok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Enable(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[enable] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-enable 404 application/json 404
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:ok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:nok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status failed
+`,
+ }
+ },
+ },
+ "[enable] dyncfg:nok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgFailed},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status failed
+
+FUNCTION_RESULT_BEGIN 3-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status failed
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Disable(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[disable] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-disable 404 application/json 404
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status disabled
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:ok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status disabled
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:nok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status disabled
+`,
+ }
+ },
+ },
+ "[disable] dyncfg:nok twice": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("fail", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:fail:jobs:test status disabled
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Restart(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[restart] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-restart 404 application/json 404
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[restart] not enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-restart 403 application/json 403
+{"status":403,"message":"Restarting data collection job is not allowed in 'accepted' state."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+`,
+ }
+ },
+ },
+ "[restart] enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+
+FUNCTION_RESULT_BEGIN 3-restart 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+`,
+ }
+ },
+ },
+ "[restart] disabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(cfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-restart 403 application/json 403
+{"status":403,"message":"Restarting data collection job is not allowed in 'disabled' state."}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status disabled
+`,
+ }
+ },
+ },
+ "[restart] enabled dyncfg:ok multiple times": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "4-restart",
+ Args: []string{dyncfgJobID(cfg), "restart"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: cfg, status: dyncfgRunning},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+
+FUNCTION_RESULT_BEGIN 3-restart 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+
+FUNCTION_RESULT_BEGIN 4-restart 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
+}
+
+func TestManager_Run_Dyncfg_Remove(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[remove] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-remove",
+ Args: []string{dyncfgJobID(cfg), "remove"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-remove 404 application/json 404
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[remove] non-dyncfg": {
+ createSim: func() *runSim {
+ stockCfg := prepareStockCfg("success", "stock")
+ userCfg := prepareUserCfg("success", "user")
+ discCfg := prepareDiscoveredCfg("success", "discovered")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.processDiscoveredConfigGroups([]*confgroup.Group{
+ prepareCfgGroup(stockCfg.Source(), "stock", stockCfg),
+ prepareCfgGroup(userCfg.Source(), "user", userCfg),
+ prepareCfgGroup(discCfg.Source(), "discovered", discCfg),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-remove",
+ Args: []string{dyncfgJobID(stockCfg), "remove"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-remove",
+ Args: []string{dyncfgJobID(userCfg), "remove"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-remove",
+ Args: []string{dyncfgJobID(discCfg), "remove"},
+ })
+ },
+ wantDiscovered: []confgroup.Config{
+ stockCfg,
+ userCfg,
+ discCfg,
+ },
+ wantSeen: []seenConfig{
+ {cfg: stockCfg, status: dyncfgAccepted},
+ {cfg: userCfg, status: dyncfgAccepted},
+ {cfg: discCfg, status: dyncfgAccepted},
+ },
+ wantExposed: []seenConfig{
+ {cfg: stockCfg, status: dyncfgAccepted},
+ {cfg: userCfg, status: dyncfgAccepted},
+ {cfg: discCfg, status: dyncfgAccepted},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+CONFIG go.d:collector:success:jobs:stock create accepted job /collectors/success stock 'type=stock,module=success,job=stock' 'schema get enable disable update restart' 0x0000 0x0000
+
+CONFIG go.d:collector:success:jobs:user create accepted job /collectors/success user 'type=user,module=success,job=user' 'schema get enable disable update restart' 0x0000 0x0000
+
+CONFIG go.d:collector:success:jobs:discovered create accepted job /collectors/success discovered 'type=discovered,module=success,job=discovered' 'schema get enable disable update restart' 0x0000 0x0000
+
+FUNCTION_RESULT_BEGIN 1-remove 405 application/json 405
+{"status":405,"message":"Removing jobs of type 'stock' is not supported. Only 'dyncfg' jobs can be removed."}
+FUNCTION_RESULT_END
+
+FUNCTION_RESULT_BEGIN 2-remove 405 application/json 405
+{"status":405,"message":"Removing jobs of type 'user' is not supported. Only 'dyncfg' jobs can be removed."}
+FUNCTION_RESULT_END
+
+FUNCTION_RESULT_BEGIN 3-remove 405 application/json 405
+{"status":405,"message":"Removing jobs of type 'discovered' is not supported. Only 'dyncfg' jobs can be removed."}
+FUNCTION_RESULT_END
+`,
+ }
+ },
+ },
+ "[remove] not enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-remove",
+ Args: []string{dyncfgJobID(cfg), "remove"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-remove 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test delete
+`,
+ }
+ },
+ },
+ "[remove] enabled dyncfg:ok": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()},
+ Payload: []byte("{}"),
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(cfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-remove",
+ Args: []string{dyncfgJobID(cfg), "remove"},
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+
+FUNCTION_RESULT_BEGIN 3-remove 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test delete
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
+ }
}
-// TODO: tech dept
-func TestManager_Run(t *testing.T) {
- groups := []*confgroup.Group{
- {
- Source: "source",
- Configs: []confgroup.Config{
- {
- "name": "name",
- "module": "success",
- "update_every": module.UpdateEvery,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- },
- {
- "name": "name",
- "module": "success",
- "update_every": module.UpdateEvery + 1,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- },
- {
- "name": "name",
- "module": "fail",
- "update_every": module.UpdateEvery + 1,
- "autodetection_retry": module.AutoDetectionRetry,
- "priority": module.Priority,
- },
+func TestManager_Run_Dyncfg_Update(t *testing.T) {
+ tests := map[string]struct {
+ createSim func() *runSim
+ }{
+ "[update] non-existing": {
+ createSim: func() *runSim {
+ cfg := prepareDyncfgCfg("success", "test")
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-update",
+ Args: []string{dyncfgJobID(cfg), "update"},
+ Payload: []byte("{}"),
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: nil,
+ wantExposed: nil,
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-update 404 application/json 404
+{"status":404,"message":"The specified module 'success' job 'test' is not registered."}
+FUNCTION_RESULT_END
+`,
+ }
},
},
+ "[update] enabled dyncfg:ok with dyncfg:ok": {
+ createSim: func() *runSim {
+ origCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "1")
+ updCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "2")
+ origBs, _ := json.Marshal(origCfg)
+ updBs, _ := json.Marshal(updCfg)
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()},
+ Payload: origBs,
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-enable",
+ Args: []string{dyncfgJobID(origCfg), "enable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-update",
+ Args: []string{dyncfgJobID(origCfg), "update"},
+ Payload: updBs,
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: updCfg, status: dyncfgRunning},
+ },
+ wantExposed: []seenConfig{
+ {cfg: updCfg, status: dyncfgRunning},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+
+FUNCTION_RESULT_BEGIN 3-update 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status running
+`,
+ }
+ },
+ },
+ "[update] disabled dyncfg:ok with dyncfg:ok": {
+ createSim: func() *runSim {
+ origCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "1")
+ updCfg := prepareDyncfgCfg("success", "test").
+ Set("option_str", "2")
+ origBs, _ := json.Marshal(origCfg)
+ updBs, _ := json.Marshal(updCfg)
+
+ return &runSim{
+ do: func(mgr *Manager) {
+ mgr.dyncfgConfig(functions.Function{
+ UID: "1-add",
+ Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()},
+ Payload: origBs,
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "2-disable",
+ Args: []string{dyncfgJobID(origCfg), "disable"},
+ })
+ mgr.dyncfgConfig(functions.Function{
+ UID: "3-update",
+ Args: []string{dyncfgJobID(origCfg), "update"},
+ Payload: updBs,
+ })
+ },
+ wantDiscovered: nil,
+ wantSeen: []seenConfig{
+ {cfg: updCfg, status: dyncfgDisabled},
+ },
+ wantExposed: []seenConfig{
+ {cfg: updCfg, status: dyncfgDisabled},
+ },
+ wantRunning: nil,
+ wantDyncfg: `
+FUNCTION_RESULT_BEGIN 1-add 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status accepted
+
+FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status disabled
+
+FUNCTION_RESULT_BEGIN 3-update 200 application/json 200
+{"status":200,"message":""}
+FUNCTION_RESULT_END
+
+CONFIG go.d:collector:success:jobs:test status disabled
+`,
+ }
+ },
+ },
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ sim := test.createSim()
+ sim.run(t)
+ })
}
- var buf bytes.Buffer
- mgr := NewManager()
- mgr.Modules = prepareMockRegistry()
- mgr.Out = safewriter.New(&buf)
- mgr.PluginName = "test.plugin"
-
- ctx, cancel := context.WithCancel(context.Background())
- in := make(chan []*confgroup.Group)
- var wg sync.WaitGroup
-
- wg.Add(1)
- go func() { defer wg.Done(); mgr.Run(ctx, in) }()
-
- select {
- case in <- groups:
- case <-time.After(time.Second * 2):
+}
+
+func prepareCfgGroup(src, srcType string, configs ...confgroup.Config) *confgroup.Group {
+ return &confgroup.Group{
+ Configs: configs,
+ Source: src,
+ SourceType: srcType,
}
+}
- time.Sleep(time.Second * 5)
- cancel()
- wg.Wait()
+func prepareStockCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeStock).
+ SetProvider("test").
+ SetSource(fmt.Sprintf("type=stock,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
+}
+
+func prepareUserCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeUser).
+ SetProvider("test").
+ SetSource(fmt.Sprintf("type=user,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
+}
- assert.True(t, buf.String() != "")
+func prepareDiscoveredCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeDiscovered).
+ SetProvider("test").
+ SetSource(fmt.Sprintf("type=discovered,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
}
-func prepareMockRegistry() module.Registry {
- reg := module.Registry{}
- reg.Register("success", module.Creator{
- Create: func() module.Module {
- return &module.MockModule{
- InitFunc: func() bool { return true },
- CheckFunc: func() bool { return true },
- ChartsFunc: func() *module.Charts {
- return &module.Charts{
- &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}},
- }
- },
- CollectFunc: func() map[string]int64 {
- return map[string]int64{"id1": 1}
- },
- }
- },
- })
- reg.Register("fail", module.Creator{
- Create: func() module.Module {
- return &module.MockModule{
- InitFunc: func() bool { return false },
- }
- },
- })
- return reg
+func prepareDyncfgCfg(module, job string) confgroup.Config {
+ return confgroup.Config{}.
+ SetSourceType(confgroup.TypeDyncfg).
+ SetProvider("dyncfg").
+ SetSource(fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, job)).
+ SetModule(module).
+ SetName(job)
}
diff --git a/agent/jobmgr/noop.go b/agent/jobmgr/noop.go
index 15883105d..4c2801c24 100644
--- a/agent/jobmgr/noop.go
+++ b/agent/jobmgr/noop.go
@@ -3,18 +3,19 @@
package jobmgr
import (
+ "github.com/netdata/go.d.plugin/agent/functions"
+
"github.com/netdata/go.d.plugin/agent/confgroup"
"github.com/netdata/go.d.plugin/agent/vnodes"
)
type noop struct{}
-func (n noop) Lock(string) (bool, error) { return true, nil }
-func (n noop) Unlock(string) error { return nil }
-func (n noop) Save(confgroup.Config, string) {}
-func (n noop) Remove(confgroup.Config) {}
-func (n noop) Contains(confgroup.Config, ...string) bool { return false }
-func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false }
-func (n noop) Register(confgroup.Config) { return }
-func (n noop) Unregister(confgroup.Config) { return }
-func (n noop) UpdateStatus(confgroup.Config, string, string) { return }
+func (n noop) Lock(string) (bool, error) { return true, nil }
+func (n noop) Unlock(string) error { return nil }
+func (n noop) Save(confgroup.Config, string) {}
+func (n noop) Remove(confgroup.Config) {}
+func (n noop) Contains(confgroup.Config, ...string) bool { return false }
+func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false }
+func (n noop) Register(name string, reg func(functions.Function)) {}
+func (n noop) Unregister(name string) {}
diff --git a/agent/jobmgr/run.go b/agent/jobmgr/run.go
deleted file mode 100644
index f1a14cadc..000000000
--- a/agent/jobmgr/run.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package jobmgr
-
-import (
- "context"
- "slices"
- "time"
-
- "github.com/netdata/go.d.plugin/agent/ticker"
-)
-
-func (m *Manager) runRunningJobsHandling(ctx context.Context) {
- tk := ticker.New(time.Second)
- defer tk.Stop()
-
- for {
- select {
- case <-ctx.Done():
- return
- case clock := <-tk.C:
- //m.Debugf("tick %d", clock)
- m.notifyRunningJobs(clock)
- }
- }
-}
-
-func (m *Manager) notifyRunningJobs(clock int) {
- m.queueMux.Lock()
- defer m.queueMux.Unlock()
-
- for _, v := range m.queue {
- v.Tick(clock)
- }
-}
-
-func (m *Manager) startJob(job Job) {
- m.queueMux.Lock()
- defer m.queueMux.Unlock()
-
- go job.Start()
-
- m.queue = append(m.queue, job)
-}
-
-func (m *Manager) stopJob(name string) {
- m.queueMux.Lock()
- defer m.queueMux.Unlock()
-
- idx := slices.IndexFunc(m.queue, func(job Job) bool {
- return job.FullName() == name
- })
-
- if idx != -1 {
- j := m.queue[idx]
- j.Stop()
-
- copy(m.queue[idx:], m.queue[idx+1:])
- m.queue[len(m.queue)-1] = nil
- m.queue = m.queue[:len(m.queue)-1]
- }
-}
-
-func (m *Manager) stopRunningJobs() {
- m.queueMux.Lock()
- defer m.queueMux.Unlock()
-
- for i, v := range m.queue {
- v.Stop()
- m.queue[i] = nil
- }
- m.queue = m.queue[:0]
-}
diff --git a/agent/jobmgr/sim_test.go b/agent/jobmgr/sim_test.go
new file mode 100644
index 000000000..870f19d7c
--- /dev/null
+++ b/agent/jobmgr/sim_test.go
@@ -0,0 +1,131 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package jobmgr
+
+import (
+ "bytes"
+ "context"
+ "errors"
+ "fmt"
+ "slices"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/netdata/go.d.plugin/agent/confgroup"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/agent/netdataapi"
+ "github.com/netdata/go.d.plugin/agent/safewriter"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type runSim struct {
+ do func(mgr *Manager)
+
+ wantDiscovered []confgroup.Config
+ wantSeen []seenConfig
+ wantExposed []seenConfig
+ wantRunning []string
+ wantDyncfg string
+}
+
+func (s *runSim) run(t *testing.T) {
+ t.Helper()
+
+ require.NotNil(t, s.do, "s.do is nil")
+
+ var buf bytes.Buffer
+ mgr := New()
+ mgr.api = netdataapi.New(safewriter.New(&buf))
+ mgr.Modules = prepareMockRegistry()
+
+ done := make(chan struct{})
+ grpCh := make(chan []*confgroup.Group)
+ ctx, cancel := context.WithCancel(context.Background())
+
+ go func() { defer close(done); close(grpCh); mgr.Run(ctx, grpCh) }()
+
+ timeout := time.Second * 5
+
+ select {
+ case <-mgr.started:
+ case <-time.After(timeout):
+ t.Errorf("failed to start work in %s", timeout)
+ }
+
+ s.do(mgr)
+ cancel()
+
+ select {
+ case <-done:
+ case <-time.After(timeout):
+ t.Errorf("failed to finish work in %s", timeout)
+ }
+
+ parts := strings.Split(buf.String(), "\n")
+ parts = slices.DeleteFunc(parts, func(s string) bool {
+ return strings.HasPrefix(s, "CONFIG") && strings.Contains(s, " template ")
+ })
+
+ wantDyncfg, gotDyncfg := strings.TrimSpace(s.wantDyncfg), strings.TrimSpace(strings.Join(parts, "\n"))
+
+ fmt.Println(gotDyncfg)
+
+ assert.Equal(t, wantDyncfg, gotDyncfg, "dyncfg commands")
+
+ var n int
+ for _, cfgs := range mgr.discoveredConfigs.items {
+ n += len(cfgs)
+ }
+
+ require.Len(t, s.wantDiscovered, n, "discoveredConfigs: different len")
+
+ for _, cfg := range s.wantDiscovered {
+ cfgs, ok := mgr.discoveredConfigs.items[cfg.Source()]
+ require.Truef(t, ok, "discoveredConfigs: source %s is not found", cfg.Source())
+ _, ok = cfgs[cfg.Hash()]
+ require.Truef(t, ok, "discoveredConfigs: source %s config %d is not found", cfg.Source(), cfg.Hash())
+ }
+
+ require.Len(t, s.wantSeen, len(mgr.seenConfigs.items), "seenConfigs: different len")
+
+ for _, scfg := range s.wantSeen {
+ v, ok := mgr.seenConfigs.lookup(scfg.cfg)
+ require.Truef(t, ok, "seenConfigs: config '%s' is not found", scfg.cfg.UID())
+ require.Truef(t, scfg.status == v.status, "seenConfigs: wrong status, want %s got %s", scfg.status, v.status)
+ }
+
+ require.Len(t, s.wantExposed, len(mgr.exposedConfigs.items), "exposedConfigs: different len")
+
+ for _, scfg := range s.wantExposed {
+ v, ok := mgr.exposedConfigs.lookup(scfg.cfg)
+ require.Truef(t, ok && scfg.cfg.UID() == v.cfg.UID(), "exposedConfigs: config '%s' is not found", scfg.cfg.UID())
+ require.Truef(t, scfg.status == v.status, "exposedConfigs: wrong status, want %s got %s", scfg.status, v.status)
+ }
+}
+
+func prepareMockRegistry() module.Registry {
+ reg := module.Registry{}
+
+ reg.Register("success", module.Creator{
+ JobConfigSchema: module.MockConfigSchema,
+ Create: func() module.Module {
+ return &module.MockModule{
+ ChartsFunc: func() *module.Charts {
+ return &module.Charts{&module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}}
+ },
+ CollectFunc: func() map[string]int64 { return map[string]int64{"id1": 1} },
+ }
+ },
+ })
+ reg.Register("fail", module.Creator{
+ Create: func() module.Module {
+ return &module.MockModule{
+ InitFunc: func() error { return errors.New("mock failed init") },
+ }
+ },
+ })
+
+ return reg
+}
diff --git a/agent/module/job.go b/agent/module/job.go
index 6200ff9f5..b9b41f03f 100644
--- a/agent/module/job.go
+++ b/agent/module/job.go
@@ -4,6 +4,7 @@ package module
import (
"bytes"
+ "errors"
"fmt"
"io"
"log/slog"
@@ -85,6 +86,10 @@ const (
func NewJob(cfg JobConfig) *Job {
var buf bytes.Buffer
+ if cfg.UpdateEvery == 0 {
+ cfg.UpdateEvery = 1
+ }
+
j := &Job{
AutoDetectEvery: cfg.AutoDetectEvery,
AutoDetectTries: infTries,
@@ -167,40 +172,44 @@ type Job struct {
const NetdataChartIDMaxLength = 1000
// FullName returns job full name.
-func (j Job) FullName() string {
+func (j *Job) FullName() string {
return j.fullName
}
// ModuleName returns job module name.
-func (j Job) ModuleName() string {
+func (j *Job) ModuleName() string {
return j.moduleName
}
// Name returns job name.
-func (j Job) Name() string {
+func (j *Job) Name() string {
return j.name
}
// Panicked returns 'panicked' flag value.
-func (j Job) Panicked() bool {
+func (j *Job) Panicked() bool {
return j.panicked
}
// AutoDetectionEvery returns value of AutoDetectEvery.
-func (j Job) AutoDetectionEvery() int {
+func (j *Job) AutoDetectionEvery() int {
return j.AutoDetectEvery
}
// RetryAutoDetection returns whether it is needed to retry autodetection.
-func (j Job) RetryAutoDetection() bool {
+func (j *Job) RetryAutoDetection() bool {
return j.AutoDetectEvery > 0 && (j.AutoDetectTries == infTries || j.AutoDetectTries > 0)
}
+func (j *Job) Configuration() any {
+ return j.module.Configuration()
+}
+
// AutoDetection invokes init, check and postCheck. It handles panic.
-func (j *Job) AutoDetection() (ok bool) {
+func (j *Job) AutoDetection() (err error) {
defer func() {
if r := recover(); r != nil {
- ok = false
+ err = fmt.Errorf("panic %v", err)
j.panicked = true
j.disableAutoDetection()
@@ -209,7 +218,7 @@ func (j *Job) AutoDetection() (ok bool) {
j.Errorf("STACK: %s", debug.Stack())
}
}
- if !ok {
+ if err != nil {
j.module.Cleanup()
}
}()
@@ -218,29 +227,29 @@ func (j *Job) AutoDetection() (ok bool) {
j.Mute()
}
- if ok = j.init(); !ok {
+ if err = j.init(); err != nil {
j.Error("init failed")
j.Unmute()
j.disableAutoDetection()
- return
+ return err
}
- if ok = j.check(); !ok {
+ if err = j.check(); err != nil {
j.Error("check failed")
j.Unmute()
- return
+ return err
}
j.Unmute()
-
j.Info("check success")
- if ok = j.postCheck(); !ok {
+
+ if err = j.postCheck(); err != nil {
j.Error("postCheck failed")
j.disableAutoDetection()
- return
+ return err
}
- return true
+ return nil
}
// Tick Tick.
@@ -316,34 +325,40 @@ func (j *Job) Cleanup() {
}
}
-func (j *Job) init() bool {
+func (j *Job) init() error {
if j.initialized {
- return true
+ return nil
+ }
+
+ if err := j.module.Init(); err != nil {
+ return err
}
- j.initialized = j.module.Init()
+ j.initialized = true
- return j.initialized
+ return nil
}
-func (j *Job) check() bool {
- ok := j.module.Check()
- if !ok && j.AutoDetectTries != infTries {
- j.AutoDetectTries--
+func (j *Job) check() error {
+ if err := j.module.Check(); err != nil {
+ if j.AutoDetectTries != infTries {
+ j.AutoDetectTries--
+ }
+ return err
}
- return ok
+ return nil
}
-func (j *Job) postCheck() bool {
+func (j *Job) postCheck() error {
if j.charts = j.module.Charts(); j.charts == nil {
j.Error("nil charts")
- return false
+ return errors.New("nil charts")
}
if err := checkCharts(*j.charts...); err != nil {
j.Errorf("charts check: %v", err)
- return false
+ return err
}
- return true
+ return nil
}
func (j *Job) runOnce() {
@@ -562,7 +577,7 @@ func (j *Job) updateChart(chart *Chart, collected map[string]int64, sinceLastRun
return chart.updated
}
-func (j Job) penalty() int {
+func (j *Job) penalty() int {
v := j.retries / penaltyStep * penaltyStep * j.updateEvery / 2
if v > maxPenalty {
return maxPenalty
diff --git a/agent/module/job_test.go b/agent/module/job_test.go
index f19fdcebd..c87f840d5 100644
--- a/agent/module/job_test.go
+++ b/agent/module/job_test.go
@@ -3,6 +3,7 @@
package module
import (
+ "errors"
"fmt"
"io"
"testing"
@@ -72,10 +73,10 @@ func TestJob_AutoDetectionEvery(t *testing.T) {
func TestJob_RetryAutoDetection(t *testing.T) {
job := newTestJob()
m := &MockModule{
- InitFunc: func() bool {
- return true
+ InitFunc: func() error {
+ return nil
},
- CheckFunc: func() bool { return false },
+ CheckFunc: func() error { return errors.New("check error") },
ChartsFunc: func() *Charts {
return &Charts{}
},
@@ -86,14 +87,14 @@ func TestJob_RetryAutoDetection(t *testing.T) {
assert.True(t, job.RetryAutoDetection())
assert.Equal(t, infTries, job.AutoDetectTries)
for i := 0; i < 1000; i++ {
- job.check()
+ _ = job.check()
}
assert.True(t, job.RetryAutoDetection())
assert.Equal(t, infTries, job.AutoDetectTries)
job.AutoDetectTries = 10
for i := 0; i < 10; i++ {
- job.check()
+ _ = job.check()
}
assert.False(t, job.RetryAutoDetection())
assert.Equal(t, 0, job.AutoDetectTries)
@@ -103,13 +104,13 @@ func TestJob_AutoDetection(t *testing.T) {
job := newTestJob()
var v int
m := &MockModule{
- InitFunc: func() bool {
+ InitFunc: func() error {
v++
- return true
+ return nil
},
- CheckFunc: func() bool {
+ CheckFunc: func() error {
v++
- return true
+ return nil
},
ChartsFunc: func() *Charts {
v++
@@ -118,47 +119,47 @@ func TestJob_AutoDetection(t *testing.T) {
}
job.module = m
- assert.True(t, job.AutoDetection())
+ assert.NoError(t, job.AutoDetection())
assert.Equal(t, 3, v)
}
func TestJob_AutoDetection_FailInit(t *testing.T) {
job := newTestJob()
m := &MockModule{
- InitFunc: func() bool {
- return false
+ InitFunc: func() error {
+ return errors.New("init error")
},
}
job.module = m
- assert.False(t, job.AutoDetection())
+ assert.Error(t, job.AutoDetection())
assert.True(t, m.CleanupDone)
}
func TestJob_AutoDetection_FailCheck(t *testing.T) {
job := newTestJob()
m := &MockModule{
- InitFunc: func() bool {
- return true
+ InitFunc: func() error {
+ return nil
},
- CheckFunc: func() bool {
- return false
+ CheckFunc: func() error {
+ return errors.New("check error")
},
}
job.module = m
- assert.False(t, job.AutoDetection())
+ assert.Error(t, job.AutoDetection())
assert.True(t, m.CleanupDone)
}
func TestJob_AutoDetection_FailPostCheck(t *testing.T) {
job := newTestJob()
m := &MockModule{
- InitFunc: func() bool {
- return true
+ InitFunc: func() error {
+ return nil
},
- CheckFunc: func() bool {
- return true
+ CheckFunc: func() error {
+ return nil
},
ChartsFunc: func() *Charts {
return nil
@@ -166,47 +167,47 @@ func TestJob_AutoDetection_FailPostCheck(t *testing.T) {
}
job.module = m
- assert.False(t, job.AutoDetection())
+ assert.Error(t, job.AutoDetection())
assert.True(t, m.CleanupDone)
}
func TestJob_AutoDetection_PanicInit(t *testing.T) {
job := newTestJob()
m := &MockModule{
- InitFunc: func() bool {
+ InitFunc: func() error {
panic("panic in Init")
},
}
job.module = m
- assert.False(t, job.AutoDetection())
+ assert.Error(t, job.AutoDetection())
assert.True(t, m.CleanupDone)
}
func TestJob_AutoDetection_PanicCheck(t *testing.T) {
job := newTestJob()
m := &MockModule{
- InitFunc: func() bool {
- return true
+ InitFunc: func() error {
+ return nil
},
- CheckFunc: func() bool {
+ CheckFunc: func() error {
panic("panic in Check")
},
}
job.module = m
- assert.False(t, job.AutoDetection())
+ assert.Error(t, job.AutoDetection())
assert.True(t, m.CleanupDone)
}
func TestJob_AutoDetection_PanicPostCheck(t *testing.T) {
job := newTestJob()
m := &MockModule{
- InitFunc: func() bool {
- return true
+ InitFunc: func() error {
+ return nil
},
- CheckFunc: func() bool {
- return true
+ CheckFunc: func() error {
+ return nil
},
ChartsFunc: func() *Charts {
panic("panic in PostCheck")
@@ -214,7 +215,7 @@ func TestJob_AutoDetection_PanicPostCheck(t *testing.T) {
}
job.module = m
- assert.False(t, job.AutoDetection())
+ assert.Error(t, job.AutoDetection())
assert.True(t, m.CleanupDone)
}
diff --git a/agent/module/mock.go b/agent/module/mock.go
index c4353eb52..65b93debf 100644
--- a/agent/module/mock.go
+++ b/agent/module/mock.go
@@ -2,12 +2,40 @@
package module
+const MockConfigSchema = `
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "option_str": {
+ "type": "string",
+ "description": "Option string value"
+ },
+ "option_int": {
+ "type": "integer",
+ "description": "Option integer value"
+ }
+ },
+ "required": [
+ "option_str",
+ "option_int"
+ ]
+}
+`
+
+type MockConfiguration struct {
+ OptionStr string `yaml:"option_str" json:"option_str"`
+ OptionInt int `yaml:"option_int" json:"option_int"`
+}
+
// MockModule MockModule.
type MockModule struct {
Base
- InitFunc func() bool
- CheckFunc func() bool
+ Config MockConfiguration `yaml:",inline" json:",inline"`
+
+ InitFunc func() error
+ CheckFunc func() error
ChartsFunc func() *Charts
CollectFunc func() map[string]int64
CleanupFunc func()
@@ -15,23 +43,23 @@ type MockModule struct {
}
// Init invokes InitFunc.
-func (m MockModule) Init() bool {
+func (m *MockModule) Init() error {
if m.InitFunc == nil {
- return true
+ return nil
}
return m.InitFunc()
}
// Check invokes CheckFunc.
-func (m MockModule) Check() bool {
+func (m *MockModule) Check() error {
if m.CheckFunc == nil {
- return true
+ return nil
}
return m.CheckFunc()
}
// Charts invokes ChartsFunc.
-func (m MockModule) Charts() *Charts {
+func (m *MockModule) Charts() *Charts {
if m.ChartsFunc == nil {
return nil
}
@@ -39,7 +67,7 @@ func (m MockModule) Charts() *Charts {
}
// Collect invokes CollectDunc.
-func (m MockModule) Collect() map[string]int64 {
+func (m *MockModule) Collect() map[string]int64 {
if m.CollectFunc == nil {
return nil
}
@@ -53,3 +81,7 @@ func (m *MockModule) Cleanup() {
}
m.CleanupDone = true
}
+
+func (m *MockModule) Configuration() any {
+ return m.Config
+}
diff --git a/agent/module/mock_test.go b/agent/module/mock_test.go
index 9c194e893..d7521911f 100644
--- a/agent/module/mock_test.go
+++ b/agent/module/mock_test.go
@@ -12,17 +12,17 @@ import (
func TestMockModule_Init(t *testing.T) {
m := &MockModule{}
- assert.True(t, m.Init())
- m.InitFunc = func() bool { return false }
- assert.False(t, m.Init())
+ assert.NoError(t, m.Init())
+ m.InitFunc = func() error { return nil }
+ assert.NoError(t, m.Init())
}
func TestMockModule_Check(t *testing.T) {
m := &MockModule{}
- assert.True(t, m.Check())
- m.CheckFunc = func() bool { return false }
- assert.False(t, m.Check())
+ assert.NoError(t, m.Check())
+ m.CheckFunc = func() error { return nil }
+ assert.NoError(t, m.Check())
}
func TestMockModule_Charts(t *testing.T) {
diff --git a/agent/module/module.go b/agent/module/module.go
index 3421a02ee..ff7a06a44 100644
--- a/agent/module/module.go
+++ b/agent/module/module.go
@@ -3,21 +3,27 @@
package module
import (
+ "encoding/json"
+ "testing"
+
"github.com/netdata/go.d.plugin/logger"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "gopkg.in/yaml.v2"
)
// Module is an interface that represents a module.
type Module interface {
// Init does initialization.
- // If it returns false, the job will be disabled.
- Init() bool
+ // If it returns error, the job will be disabled.
+ Init() error
// Check is called after Init.
- // If it returns false, the job will be disabled.
- Check() bool
+ // If it returns error, the job will be disabled.
+ Check() error
// Charts returns the chart definition.
- // Make sure not to share returned instance.
Charts() *Charts
// Collect collects metrics.
@@ -27,6 +33,8 @@ type Module interface {
Cleanup()
GetBase() *Base
+
+ Configuration() any
}
// Base is a helper struct. All modules should embed this struct.
@@ -35,3 +43,35 @@ type Base struct {
}
func (b *Base) GetBase() *Base { return b }
+
+func TestConfigurationSerialize(t *testing.T, mod Module, cfgJSON, cfgYAML []byte) {
+ t.Helper()
+ tests := map[string]struct {
+ config []byte
+ unmarshal func(in []byte, out interface{}) (err error)
+ marshal func(in interface{}) (out []byte, err error)
+ }{
+ "json": {config: cfgJSON, marshal: json.Marshal, unmarshal: json.Unmarshal},
+ "yaml": {config: cfgYAML, marshal: yaml.Marshal, unmarshal: yaml.Unmarshal},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+
+ require.NoError(t, test.unmarshal(test.config, mod), "unmarshal test->mod")
+ bs, err := test.marshal(mod.Configuration())
+ require.NoError(t, err, "marshal mod config")
+
+ var want map[string]any
+ var got map[string]any
+
+ require.NoError(t, test.unmarshal(test.config, &want), "unmarshal test->map")
+ require.NoError(t, test.unmarshal(bs, &got), "unmarshal mod->map")
+
+ require.NotNil(t, want, "want map")
+ require.NotNil(t, got, "got map")
+
+ assert.Equal(t, want, got)
+ })
+ }
+}
diff --git a/agent/module/registry.go b/agent/module/registry.go
index 4d0d2c493..f2fa661c1 100644
--- a/agent/module/registry.go
+++ b/agent/module/registry.go
@@ -44,3 +44,8 @@ func (r Registry) Register(name string, creator Creator) {
}
r[name] = creator
}
+
+func (r Registry) Lookup(name string) (Creator, bool) {
+ v, ok := r[name]
+ return v, ok
+}
diff --git a/agent/netdataapi/api.go b/agent/netdataapi/api.go
index 43c34d22d..b9ade50a8 100644
--- a/agent/netdataapi/api.go
+++ b/agent/netdataapi/api.go
@@ -165,52 +165,50 @@ func (a *API) HOSTDEFINEEND() error {
}
func (a *API) HOST(guid string) error {
- _, err := a.Write([]byte("HOST " + "'" + guid + "'" + "\n\n"))
+ _, err := a.Write([]byte("HOST " + "'" +
+ guid + "'\n\n"))
return err
}
-func (a *API) DynCfgEnable(pluginName string) error {
- _, err := a.Write([]byte("DYNCFG_ENABLE '" + pluginName + "'\n\n"))
- return err
-}
+func (a *API) FUNCRESULT(uid, contentType, payload, code string) {
+ var buf bytes.Buffer
-func (a *API) DynCfgReset() error {
- _, err := a.Write([]byte("DYNCFG_RESET\n"))
- return err
-}
+ buf.WriteString("FUNCTION_RESULT_BEGIN " +
+ uid + " " +
+ code + " " +
+ contentType + " " +
+ code + "\n",
+ )
-func (a *API) DyncCfgRegisterModule(moduleName string) error {
- _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_MODULE '%s' job_array\n\n", moduleName)
- return err
-}
+ if payload != "" {
+ buf.WriteString(payload + "\n")
+ }
-func (a *API) DynCfgRegisterJob(moduleName, jobName, jobType string) error {
- _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_JOB '%s' '%s' '%s' 0\n\n", moduleName, jobName, jobType)
- return err
-}
+ buf.WriteString("FUNCTION_RESULT_END\n\n")
-func (a *API) DynCfgReportJobStatus(moduleName, jobName, status, reason string) error {
- _, err := fmt.Fprintf(a, "REPORT_JOB_STATUS '%s' '%s' '%s' 0 '%s'\n\n", moduleName, jobName, status, reason)
- return err
+ _, _ = buf.WriteTo(a)
}
-func (a *API) FunctionResultSuccess(uid, contentType, payload string) error {
- return a.functionResult(uid, contentType, payload, "1")
-}
+func (a *API) CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) {
+ // https://learn.netdata.cloud/docs/contributing/external-plugins/#config
-func (a *API) FunctionResultReject(uid, contentType, payload string) error {
- return a.functionResult(uid, contentType, payload, "0")
+ _, _ = a.Write([]byte("CONFIG " +
+ id + " " +
+ "create" + " " +
+ status + " " +
+ configType + " " +
+ path + " " +
+ sourceType + " '" +
+ source + "' '" +
+ supportedCommands + "' 0x0000 0x0000\n\n",
+ ))
+ // supportedCommands + "' 0x7ff 0x7ff\n",
}
-func (a *API) functionResult(uid, contentType, payload, code string) error {
- var buf bytes.Buffer
-
- buf.WriteString("FUNCTION_RESULT_BEGIN " + uid + " " + code + " " + contentType + " 0\n")
- if payload != "" {
- buf.WriteString(payload + "\n")
- }
- buf.WriteString("FUNCTION_RESULT_END\n\n")
+func (a *API) CONFIGDELETE(id string) {
+ _, _ = a.Write([]byte("CONFIG " + id + " delete\n\n"))
+}
- _, err := buf.WriteTo(a)
- return err
+func (a *API) CONFIGSTATUS(id, status string) {
+ _, _ = a.Write([]byte("CONFIG " + id + " status " + status + "\n\n"))
}
diff --git a/agent/netdataapi/api_test.go b/agent/netdataapi/api_test.go
index 30f019460..e5087839b 100644
--- a/agent/netdataapi/api_test.go
+++ b/agent/netdataapi/api_test.go
@@ -260,101 +260,6 @@ HOST_DEFINE_END
)
}
-func TestAPI_DynCfgEnable(t *testing.T) {
- buf := &bytes.Buffer{}
- a := API{Writer: buf}
-
- _ = a.DynCfgEnable("plugin")
-
- assert.Equal(
- t,
- "DYNCFG_ENABLE 'plugin'\n\n",
- buf.String(),
- )
-}
-
-func TestAPI_DynCfgReset(t *testing.T) {
- buf := &bytes.Buffer{}
- a := API{Writer: buf}
-
- _ = a.DynCfgReset()
-
- assert.Equal(
- t,
- "DYNCFG_RESET\n",
- buf.String(),
- )
-}
-
-func TestAPI_DyncCfgRegisterModule(t *testing.T) {
- buf := &bytes.Buffer{}
- a := API{Writer: buf}
-
- _ = a.DyncCfgRegisterModule("module")
-
- assert.Equal(
- t,
- "DYNCFG_REGISTER_MODULE 'module' job_array\n\n",
- buf.String(),
- )
-}
-
-func TestAPI_DynCfgRegisterJob(t *testing.T) {
- buf := &bytes.Buffer{}
- a := API{Writer: buf}
+func TestAPI_FUNCRESULT(t *testing.T) {
- _ = a.DynCfgRegisterJob("module", "job", "type")
-
- assert.Equal(
- t,
- "DYNCFG_REGISTER_JOB 'module' 'job' 'type' 0\n\n",
- buf.String(),
- )
-}
-
-func TestAPI_DynCfgReportJobStatus(t *testing.T) {
- buf := &bytes.Buffer{}
- a := API{Writer: buf}
-
- _ = a.DynCfgReportJobStatus("module", "job", "status", "reason")
-
- assert.Equal(
- t,
- "REPORT_JOB_STATUS 'module' 'job' 'status' 0 'reason'\n\n",
- buf.String(),
- )
-}
-
-func TestAPI_FunctionResultSuccess(t *testing.T) {
- buf := &bytes.Buffer{}
- a := API{Writer: buf}
-
- _ = a.FunctionResultSuccess("uid", "contentType", "payload")
-
- assert.Equal(
- t,
- `FUNCTION_RESULT_BEGIN uid 1 contentType 0
-payload
-FUNCTION_RESULT_END
-
-`,
- buf.String(),
- )
-}
-
-func TestAPI_FunctionResultReject(t *testing.T) {
- buf := &bytes.Buffer{}
- a := API{Writer: buf}
-
- _ = a.FunctionResultReject("uid", "contentType", "payload")
-
- assert.Equal(
- t,
- `FUNCTION_RESULT_BEGIN uid 0 contentType 0
-payload
-FUNCTION_RESULT_END
-
-`,
- buf.String(),
- )
}
diff --git a/agent/setup.go b/agent/setup.go
index 202eedbb2..57f305298 100644
--- a/agent/setup.go
+++ b/agent/setup.go
@@ -11,6 +11,7 @@ import (
"github.com/netdata/go.d.plugin/agent/discovery"
"github.com/netdata/go.d.plugin/agent/discovery/dummy"
"github.com/netdata/go.d.plugin/agent/discovery/file"
+ "github.com/netdata/go.d.plugin/agent/discovery/sd"
"github.com/netdata/go.d.plugin/agent/hostinfo"
"github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/agent/vnodes"
@@ -144,6 +145,7 @@ func (a *Agent) buildDiscoveryConf(enabled module.Registry) discovery.Config {
}
a.Infof("dummy/read/watch paths: %d/%d/%d", len(dummyPaths), len(readPaths), len(a.ModulesSDConfPath))
+
return discovery.Config{
Registry: reg,
File: file.Config{
@@ -153,6 +155,9 @@ func (a *Agent) buildDiscoveryConf(enabled module.Registry) discovery.Config {
Dummy: dummy.Config{
Names: dummyPaths,
},
+ SD: sd.Config{
+ ConfDir: a.ModulesConfSDDir,
+ },
}
}
@@ -174,7 +179,7 @@ func (a *Agent) setupVnodeRegistry() *vnodes.Vnodes {
return reg
}
-func loadYAML(conf interface{}, path string) error {
+func loadYAML(conf any, path string) error {
f, err := os.Open(path)
if err != nil {
return err
diff --git a/cmd/godplugin/main.go b/cmd/godplugin/main.go
index 87a0a5102..6f7e9d1b6 100644
--- a/cmd/godplugin/main.go
+++ b/cmd/godplugin/main.go
@@ -83,6 +83,13 @@ func modulesConfDir(opts *cli.Option) (mpath multipath.MultiPath) {
)
}
+func modulesConfSDDir(confDir multipath.MultiPath) (mpath multipath.MultiPath) {
+ for _, v := range confDir {
+ mpath = append(mpath, filepath.Join(v, "sd"))
+ }
+ return mpath
+}
+
func watchPaths(opts *cli.Option) []string {
if watchPath == "" {
return opts.WatchPath
@@ -120,16 +127,19 @@ func main() {
logger.Level.Set(slog.LevelDebug)
}
+ dir := modulesConfDir(opts)
+
a := agent.New(agent.Config{
- Name: name,
- ConfDir: confDir(opts),
- ModulesConfDir: modulesConfDir(opts),
- ModulesSDConfPath: watchPaths(opts),
- VnodesConfDir: confDir(opts),
- StateFile: stateFile(),
- LockDir: lockDir,
- RunModule: opts.Module,
- MinUpdateEvery: opts.UpdateEvery,
+ Name: name,
+ ConfDir: confDir(opts),
+ ModulesConfDir: dir,
+ ModulesConfSDDir: modulesConfSDDir(dir),
+ ModulesConfWatchPath: watchPaths(opts),
+ VnodesConfDir: confDir(opts),
+ StateFile: stateFile(),
+ LockDir: lockDir,
+ RunModule: opts.Module,
+ MinUpdateEvery: opts.UpdateEvery,
})
a.Debugf("plugin: name=%s, version=%s", a.Name, version)
diff --git a/config/go.d/activemq.conf b/config/go.d/activemq.conf
index 0f5b157cc..d69a2d23d 100644
--- a/config/go.d/activemq.conf
+++ b/config/go.d/activemq.conf
@@ -1,11 +1,10 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/activemq
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:8161
- webadmin: admin
+#jobs:
+# - name: local
+# url: http://localhost:8161
+# webadmin: admin
+# - name: remote
+# url: http://203.0.113.1:8161
+# webadmin: admin
diff --git a/config/go.d/apache.conf b/config/go.d/apache.conf
index a57d4f4e1..a6f1089a2 100644
--- a/config/go.d/apache.conf
+++ b/config/go.d/apache.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/apache
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost/server-status?auto
-
- - name: local
- url: http://127.0.0.1/server-status?auto
+#jobs:
+# - name: local
+# url: http://localhost/server-status?auto
diff --git a/config/go.d/bind.conf b/config/go.d/bind.conf
index 8dadc8efa..83a8b6de6 100644
--- a/config/go.d/bind.conf
+++ b/config/go.d/bind.conf
@@ -1,13 +1,9 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/bind
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8653/json/v1
-
- - name: local
- url: http://127.0.0.1:8653/xml/v3
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8653/json/v1
+#
+# - name: local
+# url: http://127.0.0.1:8653/xml/v3
diff --git a/config/go.d/cassandra.conf b/config/go.d/cassandra.conf
index 8a6f5f0b7..641791af0 100644
--- a/config/go.d/cassandra.conf
+++ b/config/go.d/cassandra.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/cassandra
-#update_every: 5
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:7072/metrics
+#jobs:
+# - name: local
+# url: http://127.0.0.1:7072/metrics
diff --git a/config/go.d/chrony.conf b/config/go.d/chrony.conf
index 2cf16620b..1098abddc 100644
--- a/config/go.d/chrony.conf
+++ b/config/go.d/chrony.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/chrony
-jobs:
- - name: local
- address: '127.0.0.1:323'
- timeout: 1
-
-# - name: remote
-# address: '203.0.113.0:323'
+#jobs:
+# - name: local
+# address: '127.0.0.1:323'
diff --git a/config/go.d/cockroachdb.conf b/config/go.d/cockroachdb.conf
index 36a8eed1f..6124a44fd 100644
--- a/config/go.d/cockroachdb.conf
+++ b/config/go.d/cockroachdb.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/cockroachdb
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:8080/_status/vars
-
- - name: local
- url: http://127.0.0.1:8080/_status/vars
+#jobs:
+# - name: local
+# url: http://localhost:8080/_status/vars
diff --git a/config/go.d/consul.conf b/config/go.d/consul.conf
index cafea474b..a8f7738b4 100644
--- a/config/go.d/consul.conf
+++ b/config/go.d/consul.conf
@@ -1,15 +1,7 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/consul
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:8500
- acl_token: ""
-
- - name: local
- url: http://127.0.0.1:8500
- acl_token: ""
+#jobs:
+# - name: local
+# url: http://localhost:8500
+# acl_token: ""
diff --git a/config/go.d/coredns.conf b/config/go.d/coredns.conf
index 78b10f7bc..d706340e3 100644
--- a/config/go.d/coredns.conf
+++ b/config/go.d/coredns.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/coredns
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - url: http://127.0.0.1:9153/metrics
- - url: http://kube-dns.kube-system.svc.cluster.local:9153/metrics
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9153/metrics
diff --git a/config/go.d/couchbase.conf b/config/go.d/couchbase.conf
index 8e3ecba64..cc3c1344a 100644
--- a/config/go.d/couchbase.conf
+++ b/config/go.d/couchbase.conf
@@ -1,12 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/couchbase
-#update_every: 10
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8091
- username: admin
- password: password
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8091
+# username: admin
+# password: password
diff --git a/config/go.d/couchdb.conf b/config/go.d/couchdb.conf
index 6fc9c47e4..18d56548e 100644
--- a/config/go.d/couchdb.conf
+++ b/config/go.d/couchdb.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/couchdb
-#update_every: 10
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name: local
# url: http://127.0.0.1:5984
diff --git a/config/go.d/dns_query.conf b/config/go.d/dns_query.conf
index 94df30344..3f1b17a85 100644
--- a/config/go.d/dns_query.conf
+++ b/config/go.d/dns_query.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsquery
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name: example
# record_types:
diff --git a/config/go.d/dnsdist.conf b/config/go.d/dnsdist.conf
index f11fd6440..d0324d705 100644
--- a/config/go.d/dnsdist.conf
+++ b/config/go.d/dnsdist.conf
@@ -1,17 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsdist
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8083
- headers:
- X-API-Key: 'dnsdist-api-key' # static pre-shared authentication key for access to the REST API (api-key).
-#
-# - name: remote
-# url: http://203.0.113.0:8083
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8083
# headers:
# X-API-Key: 'dnsdist-api-key' # static pre-shared authentication key for access to the REST API (api-key).
diff --git a/config/go.d/dnsmasq.conf b/config/go.d/dnsmasq.conf
index 02c9764a3..9289c47af 100644
--- a/config/go.d/dnsmasq.conf
+++ b/config/go.d/dnsmasq.conf
@@ -1,15 +1,7 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- protocol: udp
- address: '127.0.0.1:53'
-
-# - name: remote
+#jobs:
+# - name: local
# protocol: udp
-# address: '203.0.113.0:53'
+# address: '127.0.0.1:53'
diff --git a/config/go.d/dnsmasq_dhcp.conf b/config/go.d/dnsmasq_dhcp.conf
index 23b9d21e1..ed1d5f939 100644
--- a/config/go.d/dnsmasq_dhcp.conf
+++ b/config/go.d/dnsmasq_dhcp.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/dnsmasq_dhcp
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
jobs:
- name: dnsmasq_dhcp
leases_path: /var/lib/misc/dnsmasq.leases
diff --git a/config/go.d/docker.conf b/config/go.d/docker.conf
index 72e30c75c..22dd07c7b 100644
--- a/config/go.d/docker.conf
+++ b/config/go.d/docker.conf
@@ -1,12 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/docker
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- address: 'unix:///var/run/docker.sock'
- timeout: 2
- collect_container_size: no
+#jobs:
+# - name: local
+# address: 'unix:///var/run/docker.sock'
+# timeout: 2
+# collect_container_size: no
diff --git a/config/go.d/docker_engine.conf b/config/go.d/docker_engine.conf
index 184cac84e..abb775113 100644
--- a/config/go.d/docker_engine.conf
+++ b/config/go.d/docker_engine.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/docker_engine
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:9323/metrics
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9323/metrics
diff --git a/config/go.d/dockerhub.conf b/config/go.d/dockerhub.conf
index b9606a24b..4be2eb1be 100644
--- a/config/go.d/dockerhub.conf
+++ b/config/go.d/dockerhub.conf
@@ -1,10 +1,9 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/dockerhub
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
-# - name: local
-# repositories: ['user1/name1', 'user2/name2', 'user3/name3']
+# - name: dockerhub
+# repositories:
+# - user1/name1
+# - user2/name2
+# - user3/name3
diff --git a/config/go.d/elasticsearch.conf b/config/go.d/elasticsearch.conf
index 16c19bb7f..95b9ba0bc 100644
--- a/config/go.d/elasticsearch.conf
+++ b/config/go.d/elasticsearch.conf
@@ -1,19 +1,7 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/elasticsearch
-#update_every: 5
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:9200
- cluster_mode: no
-
- # opensearch
- - name: local
- url: https://127.0.0.1:9200
- cluster_mode: no
- tls_skip_verify: yes
- username: admin
- password: admin
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9200
+# cluster_mode: no
diff --git a/config/go.d/energid.conf b/config/go.d/energid.conf
deleted file mode 100644
index e6495062e..000000000
--- a/config/go.d/energid.conf
+++ /dev/null
@@ -1,17 +0,0 @@
-## All available configuration options, their descriptions and default values:
-## https://github.com/netdata/go.d.plugin/tree/master/modules/energid
-
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-#jobs:
-# - name: energi
-# url: http://127.0.0.1:9796
-# username: energy
-# password: energy
-#
-# - name: bitcoin
-# url: http://203.0.113.0:8332
-# username: bitcoin
-# password: bitcoin
diff --git a/config/go.d/envoy.conf b/config/go.d/envoy.conf
index 02e7c9a23..79bdb3cd8 100644
--- a/config/go.d/envoy.conf
+++ b/config/go.d/envoy.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/envoy
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:9901/stats/prometheus
+#jobs:
+# - name: local
+# url: http://127.0.0.1:9901/stats/prometheus
diff --git a/config/go.d/filecheck.conf b/config/go.d/filecheck.conf
index ae1ce303a..d47bf3cce 100644
--- a/config/go.d/filecheck.conf
+++ b/config/go.d/filecheck.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/filecheck
-#update_every: 10
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name: files_example
# files:
diff --git a/config/go.d/fluentd.conf b/config/go.d/fluentd.conf
index 654b4707d..eb47a1dcd 100644
--- a/config/go.d/fluentd.conf
+++ b/config/go.d/fluentd.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/fluentd
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:24220
-
- - name: local
- url: http://127.0.0.1:24220
+#jobs:
+# - name: local
+# url: http://localhost:24220
diff --git a/config/go.d/freeradius.conf b/config/go.d/freeradius.conf
index 5b3df0a83..80ed65e67 100644
--- a/config/go.d/freeradius.conf
+++ b/config/go.d/freeradius.conf
@@ -1,17 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/freeradius
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- address: localhost
- port: 18121
- secret: adminsecret
-
- - name: local
- address: 127.0.0.1
- port: 18121
- secret: adminsecret
+#jobs:
+# - name: local
+# address: localhost
+# port: 18121
+# secret: adminsecret
diff --git a/config/go.d/geth.conf b/config/go.d/geth.conf
index c94083e1c..bbd53a20f 100644
--- a/config/go.d/geth.conf
+++ b/config/go.d/geth.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/geth
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: "local"
- url: http://localhost:6060/debug/metrics/prometheus
+#jobs:
+# - name: local
+# url: http://localhost:6060/debug/metrics/prometheus
diff --git a/config/go.d/haproxy.conf b/config/go.d/haproxy.conf
index e589ac2c6..ab4897d2e 100644
--- a/config/go.d/haproxy.conf
+++ b/config/go.d/haproxy.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/haproxy
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8404/metrics
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8404/metrics
diff --git a/config/go.d/hdfs.conf b/config/go.d/hdfs.conf
index 44c052711..531b094b8 100644
--- a/config/go.d/hdfs.conf
+++ b/config/go.d/hdfs.conf
@@ -1,11 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/hdfs
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-#
-
#jobs:
# - name: namenode
# url: http://127.0.0.1:9870/jmx
diff --git a/config/go.d/httpcheck.conf b/config/go.d/httpcheck.conf
index b29ead296..b552012dc 100644
--- a/config/go.d/httpcheck.conf
+++ b/config/go.d/httpcheck.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/httpcheck
-#update_every : 1
-#autodetection_retry : 0
-#priority : 70000
-
#jobs:
# - name: jira
# url: https://jira.localdomain/
diff --git a/config/go.d/isc_dhcpd.conf b/config/go.d/isc_dhcpd.conf
index 03b195b80..79d906802 100644
--- a/config/go.d/isc_dhcpd.conf
+++ b/config/go.d/isc_dhcpd.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/isc_dhcpd
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name: ipv4_example
# leases_path: '/path/to/dhcpd.leases_ipv4'
diff --git a/config/go.d/k8s_kubelet.conf b/config/go.d/k8s_kubelet.conf
index 64f895d84..37aaba03f 100644
--- a/config/go.d/k8s_kubelet.conf
+++ b/config/go.d/k8s_kubelet.conf
@@ -1,11 +1,5 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubelet
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - url: http://127.0.0.1:10255/metrics
- - url: https://localhost:10250/metrics
- tls_skip_verify: yes
+#jobs:
+# - url: http://127.0.0.1:10255/metrics
diff --git a/config/go.d/k8s_kubeproxy.conf b/config/go.d/k8s_kubeproxy.conf
index dabb7fe1b..4cf00bdd8 100644
--- a/config/go.d/k8s_kubeproxy.conf
+++ b/config/go.d/k8s_kubeproxy.conf
@@ -1,8 +1,5 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_kubeproxy
-update_every: 1
-autodetection_retry: 0
-
-jobs:
- - url: http://127.0.0.1:10249/metrics
+#jobs:
+# - url: http://127.0.0.1:10249/metrics
diff --git a/config/go.d/k8s_state.conf b/config/go.d/k8s_state.conf
index ba386e0d2..98e9a2e20 100644
--- a/config/go.d/k8s_state.conf
+++ b/config/go.d/k8s_state.conf
@@ -1,9 +1,5 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/k8s_state
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
jobs:
- name: k8s_state
diff --git a/config/go.d/lighttpd.conf b/config/go.d/lighttpd.conf
index df403375e..005ab1244 100644
--- a/config/go.d/lighttpd.conf
+++ b/config/go.d/lighttpd.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/lighttpd
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost/server-status?auto
-
- - name: local
- url: http://127.0.0.1/server-status?auto
+#jobs:
+# - name: local
+# url: http://localhost/server-status?auto
diff --git a/config/go.d/logind.conf b/config/go.d/logind.conf
index 5ff90345a..764abc921 100644
--- a/config/go.d/logind.conf
+++ b/config/go.d/logind.conf
@@ -1,9 +1,5 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/logind
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
jobs:
- name: logind
diff --git a/config/go.d/logstash.conf b/config/go.d/logstash.conf
index 4afa1a298..f715c5d78 100644
--- a/config/go.d/logstash.conf
+++ b/config/go.d/logstash.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/logstash
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:9600
-
- - name: local
- url: http://127.0.0.1:9600
+#jobs:
+# - name: local
+# url: http://localhost:9600
diff --git a/config/go.d/mongodb.conf b/config/go.d/mongodb.conf
index 5236df659..c9ef76b6a 100644
--- a/config/go.d/mongodb.conf
+++ b/config/go.d/mongodb.conf
@@ -1,14 +1,10 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/mongodb
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- uri: 'mongodb://localhost:27017'
- timeout: 2
+#jobs:
+# - name: local
+# uri: 'mongodb://localhost:27017'
+# timeout: 2
# databases:
# include:
# - "* *"
diff --git a/config/go.d/mysql.conf b/config/go.d/mysql.conf
index 15ce2abc9..9a639ddf0 100644
--- a/config/go.d/mysql.conf
+++ b/config/go.d/mysql.conf
@@ -1,11 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/mysql
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-# timeout: 1
-
jobs:
# my.cnf
- name: local
diff --git a/config/go.d/nginxplus.conf b/config/go.d/nginxplus.conf
index d66318a76..b4f023f1a 100644
--- a/config/go.d/nginxplus.conf
+++ b/config/go.d/nginxplus.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/nginxplus
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1
+#jobs:
+# - name: local
+# url: http://127.0.0.1
diff --git a/config/go.d/nginxvts.conf b/config/go.d/nginxvts.conf
index 39fb477ea..775e64c19 100644
--- a/config/go.d/nginxvts.conf
+++ b/config/go.d/nginxvts.conf
@@ -1,12 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/nginxvts
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1/status/format/json
-# - name: remote
-# url: http://203.0.113.0/status/format/json
+#jobs:
+# - name: local
+# url: http://127.0.0.1/status/format/json
diff --git a/config/go.d/ntpd.conf b/config/go.d/ntpd.conf
index e36b317bf..2e66efbfa 100644
--- a/config/go.d/ntpd.conf
+++ b/config/go.d/ntpd.conf
@@ -5,10 +5,7 @@
#autodetection_retry: 0
#priority: 70000
-jobs:
- - name: local
- address: '127.0.0.1:123'
- collect_peers: no
-
-# - name: remote
-# address: '203.0.113.0:123'
+#jobs:
+# - name: local
+# address: '127.0.0.1:123'
+# collect_peers: no
diff --git a/config/go.d/openvpn.conf b/config/go.d/openvpn.conf
index aaf297c5c..d7c94d8b9 100644
--- a/config/go.d/openvpn.conf
+++ b/config/go.d/openvpn.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/openvpn
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- address: 127.0.0.1:7505
+#jobs:
+# - name: local
+# address: 127.0.0.1:7505
diff --git a/config/go.d/openvpn_status_log.conf b/config/go.d/openvpn_status_log.conf
index 4959f1c8c..bc9fe391d 100644
--- a/config/go.d/openvpn_status_log.conf
+++ b/config/go.d/openvpn_status_log.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/openvpn_status_log
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
jobs:
- name: local
log_path: '/var/log/openvpn/status.log'
diff --git a/config/go.d/pgbouncer.conf b/config/go.d/pgbouncer.conf
index a6eb76d32..85c020fe1 100644
--- a/config/go.d/pgbouncer.conf
+++ b/config/go.d/pgbouncer.conf
@@ -1,12 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/pgbouncer
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'
- - name: local
- dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'
+#jobs:
+# - name: local
+# dsn: 'postgres://postgres:postgres@127.0.0.1:6432/pgbouncer'
+# - name: local
+# dsn: 'host=/tmp dbname=pgbouncer user=postgres port=6432'
diff --git a/config/go.d/phpdaemon.conf b/config/go.d/phpdaemon.conf
index 75ddda0db..b7ae1ae0d 100644
--- a/config/go.d/phpdaemon.conf
+++ b/config/go.d/phpdaemon.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/phpdaemon
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8509/FullStatus
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8509/FullStatus
diff --git a/config/go.d/phpfpm.conf b/config/go.d/phpfpm.conf
index 1ae811c6f..55f8d6e75 100644
--- a/config/go.d/phpfpm.conf
+++ b/config/go.d/phpfpm.conf
@@ -1,17 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/phpfpm
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost/status?full&json
-
- - name: local
- url: http://127.0.0.1/status?full&json
-
- - name: local
- url: http://[::1]/status?full&json
-
+#jobs:
+# - name: local
+# url: http://localhost/status?full&json
diff --git a/config/go.d/pihole.conf b/config/go.d/pihole.conf
index 856d42635..60d223537 100644
--- a/config/go.d/pihole.conf
+++ b/config/go.d/pihole.conf
@@ -1,14 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/pihole
-#update_every : 5
-#timeout : 5
-#autodetection_retry : 0
-#priority : 70000
-
-jobs:
- - name: pihole
- url: http://127.0.0.1
-
+#jobs:
+# - name: pihole
+# url: http://127.0.0.1
# - name: pihole
# url: http://pi.hole
diff --git a/config/go.d/pika.conf b/config/go.d/pika.conf
index 96a7766b7..697ddbeea 100644
--- a/config/go.d/pika.conf
+++ b/config/go.d/pika.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/pika
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- address: 'redis://@127.0.0.1:9221'
+#jobs:
+# - name: local
+# address: 'redis://@127.0.0.1:9221'
diff --git a/config/go.d/ping.conf b/config/go.d/ping.conf
index 7fa4b004a..f3034b9cd 100644
--- a/config/go.d/ping.conf
+++ b/config/go.d/ping.conf
@@ -1,12 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/ping
-#update_every: 5
-#autodetection_retry: 0
-#priority: 70000
-
-## Uncomment the following lines to create a data collection config:
-
#jobs:
# - name: example
# hosts:
diff --git a/config/go.d/portcheck.conf b/config/go.d/portcheck.conf
index 237b68a12..710c04997 100644
--- a/config/go.d/portcheck.conf
+++ b/config/go.d/portcheck.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/portcheck
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name: job1
# host: 10.0.0.1
diff --git a/config/go.d/powerdns.conf b/config/go.d/powerdns.conf
index 7873d54f5..d31336538 100644
--- a/config/go.d/powerdns.conf
+++ b/config/go.d/powerdns.conf
@@ -1,17 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8081
-# headers:
-# X-API-KEY: secret # static pre-shared authentication key for access to the REST API (api-key).
-
-# - name: remote
-# url: http://203.0.113.0:8081
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8081
# headers:
# X-API-KEY: secret # static pre-shared authentication key for access to the REST API (api-key).
diff --git a/config/go.d/powerdns_recursor.conf b/config/go.d/powerdns_recursor.conf
index 31873f2a8..30e6202a5 100644
--- a/config/go.d/powerdns_recursor.conf
+++ b/config/go.d/powerdns_recursor.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/powerdns_recursor
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8081
-
-# - name: remote
-# url: http://203.0.113.0:8081
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8081
diff --git a/config/go.d/prometheus.conf b/config/go.d/prometheus.conf
index 43fa0af29..837f6f6c7 100644
--- a/config/go.d/prometheus.conf
+++ b/config/go.d/prometheus.conf
@@ -1,1361 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/prometheus
-#update_every: 10
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- # https://github.com/prometheus/prometheus/wiki/Default-port-allocations
- # - name: node_exporter_local
- # url: 'http://127.0.0.1:9100/metrics'
- - name: loki_local
- url: 'http://127.0.0.1:3100/metrics'
- - name: wireguard_local
- url: 'http://127.0.0.1:9586/metrics'
- expected_prefix: 'wireguard_'
- - name: netbox_local
- url: 'http://127.0.0.1:8001/metrics'
- expected_prefix: 'django_'
- - name: haproxy_exporter_local
- url: 'http://127.0.0.1:9101/metrics'
- - name: statsd_exporter_local
- url: 'http://127.0.0.1:9102/metrics'
- - name: collectd_exporter_local
- url: 'http://127.0.0.1:9103/metrics'
- - name: mysqld_exporter_local
- url: 'http://127.0.0.1:9104/metrics'
- - name: mesos_exporter_local
- url: 'http://127.0.0.1:9105/metrics'
- - name: cloudwatch_exporter_local
- url: 'http://127.0.0.1:9106/metrics'
- - name: consul_exporter_local
- url: 'http://127.0.0.1:9107/metrics'
- - name: graphite_exporter_local
- url: 'http://127.0.0.1:9108/metrics'
- - name: graphite_exporter_local
- url: 'http://127.0.0.1:9109/metrics'
- - name: blackbox_exporter_local
- url: 'http://127.0.0.1:9110/metrics'
- - name: expvar_exporter_local
- url: 'http://127.0.0.1:9111/metrics'
- - name: promacct_pcap-based_network_traffic_accounting_local
- url: 'http://127.0.0.1:9112/metrics'
- - name: nginx_exporter_local
- url: 'http://127.0.0.1:9113/metrics'
- - name: elasticsearch_exporter_local
- url: 'http://127.0.0.1:9114/metrics'
- - name: blackbox_exporter_local
- url: 'http://127.0.0.1:9115/metrics'
- - name: snmp_exporter_local
- url: 'http://127.0.0.1:9116/metrics'
- - name: apache_exporter_local
- url: 'http://127.0.0.1:9117/metrics'
- - name: jenkins_exporter_local
- url: 'http://127.0.0.1:9118/metrics'
- - name: bind_exporter_local
- url: 'http://127.0.0.1:9119/metrics'
- - name: powerdns_exporter_local
- url: 'http://127.0.0.1:9120/metrics'
- - name: redis_exporter_local
- url: 'http://127.0.0.1:9121/metrics'
- - name: influxdb_exporter_local
- url: 'http://127.0.0.1:9122/metrics'
- - name: rethinkdb_exporter_local
- url: 'http://127.0.0.1:9123/metrics'
- - name: freebsd_sysctl_exporter_local
- url: 'http://127.0.0.1:9124/metrics'
- - name: statsd_exporter_local
- url: 'http://127.0.0.1:9125/metrics'
- - name: new_relic_exporter_local
- url: 'http://127.0.0.1:9126/metrics'
- - name: pgbouncer_exporter_local
- url: 'http://127.0.0.1:9127/metrics'
- - name: ceph_exporter_local
- url: 'http://127.0.0.1:9128/metrics'
- - name: haproxy_log_exporter_local
- url: 'http://127.0.0.1:9129/metrics'
- - name: unifi_poller_local
- url: 'http://127.0.0.1:9130/metrics'
- - name: varnish_exporter_local
- url: 'http://127.0.0.1:9131/metrics'
- - name: airflow_exporter_local
- url: 'http://127.0.0.1:9132/metrics'
- - name: fritz_box_exporter_local
- url: 'http://127.0.0.1:9133/metrics'
- - name: zfs_exporter_local
- url: 'http://127.0.0.1:9134/metrics'
- - name: rtorrent_exporter_local
- url: 'http://127.0.0.1:9135/metrics'
- - name: collins_exporter_local
- url: 'http://127.0.0.1:9136/metrics'
- - name: silicondust_hdhomerun_exporter_local
- url: 'http://127.0.0.1:9137/metrics'
- - name: heka_exporter_local
- url: 'http://127.0.0.1:9138/metrics'
- - name: azure_sql_exporter_local
- url: 'http://127.0.0.1:9139/metrics'
- - name: mirth_exporter_local
- url: 'http://127.0.0.1:9140/metrics'
- - name: zookeeper_exporter_local
- url: 'http://127.0.0.1:9141/metrics'
- - name: big-ip_exporter_local
- url: 'http://127.0.0.1:9142/metrics'
- - name: cloudmonitor_exporter_local
- url: 'http://127.0.0.1:9143/metrics'
- - name: aerospike_exporter_local
- url: 'http://127.0.0.1:9145/metrics'
- - name: icecast_exporter_local
- url: 'http://127.0.0.1:9146/metrics'
- - name: nginx_request_exporter_local
- url: 'http://127.0.0.1:9147/metrics'
- - name: nats_exporter_local
- url: 'http://127.0.0.1:9148/metrics'
- - name: passenger_exporter_local
- url: 'http://127.0.0.1:9149/metrics'
- - name: memcached_exporter_local
- url: 'http://127.0.0.1:9150/metrics'
- - name: varnish_request_exporter_local
- url: 'http://127.0.0.1:9151/metrics'
- - name: command_runner_exporter_local
- url: 'http://127.0.0.1:9152/metrics'
- - name: coredns_local
- url: 'http://127.0.0.1:9153/metrics'
- - name: postfix_exporter_local
- url: 'http://127.0.0.1:9154/metrics'
- - name: vsphere_graphite_local
- url: 'http://127.0.0.1:9155/metrics'
- - name: webdriver_exporter_local
- url: 'http://127.0.0.1:9156/metrics'
- - name: ibm_mq_exporter_local
- url: 'http://127.0.0.1:9157/metrics'
- - name: pingdom_exporter_local
- url: 'http://127.0.0.1:9158/metrics'
- - name: apache_flink_exporter_local
- url: 'http://127.0.0.1:9160/metrics'
- - name: oracle_db_exporter_local
- url: 'http://127.0.0.1:9161/metrics'
- - name: apcupsd_exporter_local
- url: 'http://127.0.0.1:9162/metrics'
- - name: zgres_exporter_local
- url: 'http://127.0.0.1:9163/metrics'
- - name: s6_exporter_local
- url: 'http://127.0.0.1:9164/metrics'
- - name: keepalived_exporter_local
- url: 'http://127.0.0.1:9165/metrics'
- - name: dovecot_exporter_local
- url: 'http://127.0.0.1:9166/metrics'
- - name: unbound_exporter_local
- url: 'http://127.0.0.1:9167/metrics'
- - name: gitlab-monitor_local
- url: 'http://127.0.0.1:9168/metrics'
- - name: lustre_exporter_local
- url: 'http://127.0.0.1:9169/metrics'
- - name: docker_hub_exporter_local
- url: 'http://127.0.0.1:9170/metrics'
- - name: github_exporter_local
- url: 'http://127.0.0.1:9171/metrics'
- - name: script_exporter_local
- url: 'http://127.0.0.1:9172/metrics'
- - name: rancher_exporter_local
- url: 'http://127.0.0.1:9173/metrics'
- - name: docker-cloud_exporter_local
- url: 'http://127.0.0.1:9174/metrics'
- - name: saltstack_exporter_local
- url: 'http://127.0.0.1:9175/metrics'
- - name: openvpn_exporter_local
- url: 'http://127.0.0.1:9176/metrics'
- - name: libvirt_exporter_local
- url: 'http://127.0.0.1:9177/metrics'
- - name: stream_exporter_local
- url: 'http://127.0.0.1:9178/metrics'
- - name: shield_exporter_local
- url: 'http://127.0.0.1:9179/metrics'
- - name: scylladb_exporter_local
- url: 'http://127.0.0.1:9180/metrics'
- - name: openstack_ceilometer_exporter_local
- url: 'http://127.0.0.1:9181/metrics'
- - name: openstack_exporter_local
- url: 'http://127.0.0.1:9183/metrics'
- - name: twitch_exporter_local
- url: 'http://127.0.0.1:9184/metrics'
- - name: kafka_topic_exporter_local
- url: 'http://127.0.0.1:9185/metrics'
- - name: cloud_foundry_firehose_exporter_local
- url: 'http://127.0.0.1:9186/metrics'
- - name: postgresql_exporter_local
- url: 'http://127.0.0.1:9187/metrics'
- - name: crypto_exporter_local
- url: 'http://127.0.0.1:9188/metrics'
- - name: hetzner_cloud_csi_driver_nodes_local
- url: 'http://127.0.0.1:9189/metrics'
- - name: bosh_exporter_local
- url: 'http://127.0.0.1:9190/metrics'
- - name: netflow_exporter_local
- url: 'http://127.0.0.1:9191/metrics'
- - name: ceph_exporter_local
- url: 'http://127.0.0.1:9192/metrics'
- - name: cloud_foundry_exporter_local
- url: 'http://127.0.0.1:9193/metrics'
- - name: bosh_tsdb_exporter_local
- url: 'http://127.0.0.1:9194/metrics'
- - name: maxscale_exporter_local
- url: 'http://127.0.0.1:9195/metrics'
- - name: upnp_internet_gateway_device_exporter_local
- url: 'http://127.0.0.1:9196/metrics'
- - name: logstash_exporter_local
- url: 'http://127.0.0.1:9198/metrics'
- - name: cloudflare_exporter_local
- url: 'http://127.0.0.1:9199/metrics'
- - name: pacemaker_exporter_local
- url: 'http://127.0.0.1:9202/metrics'
- - name: domain_exporter_local
- url: 'http://127.0.0.1:9203/metrics'
- - name: pcsensor_temper_exporter_local
- url: 'http://127.0.0.1:9204/metrics'
- - name: nextcloud_exporter_local
- url: 'http://127.0.0.1:9205/metrics'
- - name: elasticsearch_exporter_local
- url: 'http://127.0.0.1:9206/metrics'
- - name: mysql_exporter_local
- url: 'http://127.0.0.1:9207/metrics'
- - name: kafka_consumer_group_exporter_local
- url: 'http://127.0.0.1:9208/metrics'
- - name: fastnetmon_advanced_exporter_local
- url: 'http://127.0.0.1:9209/metrics'
- - name: netatmo_exporter_local
- url: 'http://127.0.0.1:9210/metrics'
- - name: dnsbl-exporter_local
- url: 'http://127.0.0.1:9211/metrics'
- - name: digitalocean_exporter_local
- url: 'http://127.0.0.1:9212/metrics'
- - name: custom_exporter_local
- url: 'http://127.0.0.1:9213/metrics'
- - name: mqtt_blackbox_exporter_local
- url: 'http://127.0.0.1:9214/metrics'
- - name: prometheus_graphite_bridge_local
- url: 'http://127.0.0.1:9215/metrics'
- - name: mongodb_exporter_local
- url: 'http://127.0.0.1:9216/metrics'
- - name: consul_agent_exporter_local
- url: 'http://127.0.0.1:9217/metrics'
- - name: promql-guard_local
- url: 'http://127.0.0.1:9218/metrics'
- - name: ssl_certificate_exporter_local
- url: 'http://127.0.0.1:9219/metrics'
- - name: netapp_trident_exporter_local
- url: 'http://127.0.0.1:9220/metrics'
- - name: proxmox_ve_exporter_local
- url: 'http://127.0.0.1:9221/metrics'
- - name: aws_ecs_exporter_local
- url: 'http://127.0.0.1:9222/metrics'
- - name: bladepsgi_exporter_local
- url: 'http://127.0.0.1:9223/metrics'
- - name: fluentd_exporter_local
- url: 'http://127.0.0.1:9224/metrics'
- - name: mailexporter_local
- url: 'http://127.0.0.1:9225/metrics'
- - name: allas_local
- url: 'http://127.0.0.1:9226/metrics'
- - name: proc_exporter_local
- url: 'http://127.0.0.1:9227/metrics'
- - name: flussonic_exporter_local
- url: 'http://127.0.0.1:9228/metrics'
- - name: gitlab-workhorse_local
- url: 'http://127.0.0.1:9229/metrics'
- - name: network_ups_tools_exporter_local
- url: 'http://127.0.0.1:9230/metrics'
- - name: solr_exporter_local
- url: 'http://127.0.0.1:9231/metrics'
- - name: osquery_exporter_local
- url: 'http://127.0.0.1:9232/metrics'
- - name: mgmt_exporter_local
- url: 'http://127.0.0.1:9233/metrics'
- - name: mosquitto_exporter_local
- url: 'http://127.0.0.1:9234/metrics'
- - name: gitlab-pages_exporter_local
- url: 'http://127.0.0.1:9235/metrics'
- - name: gitlab_gitaly_exporter_local
- url: 'http://127.0.0.1:9236/metrics'
- - name: sql_exporter_local
- url: 'http://127.0.0.1:9237/metrics'
- - name: uwsgi_expoter_local
- url: 'http://127.0.0.1:9238/metrics'
- - name: surfboard_exporter_local
- url: 'http://127.0.0.1:9239/metrics'
- - name: tinyproxy_exporter_local
- url: 'http://127.0.0.1:9240/metrics'
- - name: arangodb_exporter_local
- url: 'http://127.0.0.1:9241/metrics'
- - name: ceph_radosgw_usage_exporter_local
- url: 'http://127.0.0.1:9242/metrics'
- - name: chef_compliance_exporter_local
- url: 'http://127.0.0.1:9243/metrics'
- - name: moby_container_exporter_local
- url: 'http://127.0.0.1:9244/metrics'
- - name: naemon_nagios_exporter_local
- url: 'http://127.0.0.1:9245/metrics'
- - name: smartpi_local
- url: 'http://127.0.0.1:9246/metrics'
- - name: sphinx_exporter_local
- url: 'http://127.0.0.1:9247/metrics'
- - name: freebsd_gstat_exporter_local
- url: 'http://127.0.0.1:9248/metrics'
- - name: apache_flink_metrics_reporter_local
- url: 'http://127.0.0.1:9249/metrics'
- - name: opentsdb_exporter_local
- url: 'http://127.0.0.1:9250/metrics'
- - name: sensu_exporter_local
- url: 'http://127.0.0.1:9251/metrics'
- - name: gitlab_runner_exporter_local
- url: 'http://127.0.0.1:9252/metrics'
- - name: php-fpm_exporter_local
- url: 'http://127.0.0.1:9253/metrics'
- - name: kafka_burrow_exporter_local
- url: 'http://127.0.0.1:9254/metrics'
- - name: google_stackdriver_exporter_local
- url: 'http://127.0.0.1:9255/metrics'
- - name: td-agent_exporter_local
- url: 'http://127.0.0.1:9256/metrics'
- - name: smart_exporter_local
- url: 'http://127.0.0.1:9257/metrics'
- - name: hello_sense_exporter_local
- url: 'http://127.0.0.1:9258/metrics'
- - name: azure_resources_exporter_local
- url: 'http://127.0.0.1:9259/metrics'
- - name: buildkite_exporter_local
- url: 'http://127.0.0.1:9260/metrics'
- - name: grafana_exporter_local
- url: 'http://127.0.0.1:9261/metrics'
- - name: bloomsky_exporter_local
- url: 'http://127.0.0.1:9262/metrics'
- - name: vmware_guest_exporter_local
- url: 'http://127.0.0.1:9263/metrics'
- - name: nest_exporter_local
- url: 'http://127.0.0.1:9264/metrics'
- - name: weather_exporter_local
- url: 'http://127.0.0.1:9265/metrics'
- - name: openhab_exporter_local
- url: 'http://127.0.0.1:9266/metrics'
- - name: nagios_livestatus_exporter_local
- url: 'http://127.0.0.1:9267/metrics'
- - name: cratedb_remote_remote_read_write_adapter_local
- url: 'http://127.0.0.1:9268/metrics'
- - name: fluent-agent-lite_exporter_local
- url: 'http://127.0.0.1:9269/metrics'
- - name: jmeter_exporter_local
- url: 'http://127.0.0.1:9270/metrics'
- - name: pagespeed_exporter_local
- url: 'http://127.0.0.1:9271/metrics'
- - name: vmware_exporter_local
- url: 'http://127.0.0.1:9272/metrics'
- - name: kubernetes_persistentvolume_disk_usage_exporter_local
- url: 'http://127.0.0.1:9274/metrics'
- - name: nrpe_exporter_local
- url: 'http://127.0.0.1:9275/metrics'
- - name: githubql_exporter_local
- url: 'http://127.0.0.1:9276/metrics'
- - name: azure_monitor_exporter_local
- url: 'http://127.0.0.1:9276/metrics'
- - name: mongo_collection_exporter_local
- url: 'http://127.0.0.1:9277/metrics'
- - name: crypto_miner_exporter_local
- url: 'http://127.0.0.1:9278/metrics'
- - name: instaclustr_exporter_local
- url: 'http://127.0.0.1:9279/metrics'
- - name: citrix_netscaler_exporter_local
- url: 'http://127.0.0.1:9280/metrics'
- - name: fastd_exporter_local
- url: 'http://127.0.0.1:9281/metrics'
- - name: freeswitch_exporter_local
- url: 'http://127.0.0.1:9282/metrics'
- - name: ceph_ceph-mgr_prometheus_plugin_local
- url: 'http://127.0.0.1:9283/metrics'
- - name: gobetween_local
- url: 'http://127.0.0.1:9284/metrics'
- - name: database_exporter_local
- url: 'http://127.0.0.1:9285/metrics'
- - name: vdo_compression_and_deduplication_exporter_local
- url: 'http://127.0.0.1:9286/metrics'
- - name: ceph_iscsi_gateway_statistics_local
- url: 'http://127.0.0.1:9287/metrics'
- - name: consrv_local
- url: 'http://127.0.0.1:9288/metrics'
- - name: lovoos_ipmi_exporter_local
- url: 'http://127.0.0.1:9289/metrics'
- - name: soundclouds_ipmi_exporter_local
- url: 'http://127.0.0.1:9290/metrics'
- - name: ibm_z_hmc_exporter_local
- url: 'http://127.0.0.1:9291/metrics'
- - name: netapp_ontap_api_exporter_local
- url: 'http://127.0.0.1:9292/metrics'
- - name: connection_status_exporter_local
- url: 'http://127.0.0.1:9293/metrics'
- - name: miflora_flower_care_exporter_local
- url: 'http://127.0.0.1:9294/metrics'
- - name: freifunk_exporter_local
- url: 'http://127.0.0.1:9295/metrics'
- - name: odbc_exporter_local
- url: 'http://127.0.0.1:9296/metrics'
- - name: machbase_exporter_local
- url: 'http://127.0.0.1:9297/metrics'
- - name: generic_exporter_local
- url: 'http://127.0.0.1:9298/metrics'
- - name: exporter_aggregator_local
- url: 'http://127.0.0.1:9299/metrics'
- - name: squid_exporter_local
- url: 'http://127.0.0.1:9301/metrics'
- - name: faucet_sdn_faucet_exporter_local
- url: 'http://127.0.0.1:9302/metrics'
- - name: faucet_sdn_gauge_exporter_local
- url: 'http://127.0.0.1:9303/metrics'
- - name: logstash_exporter_local
- url: 'http://127.0.0.1:9304/metrics'
- - name: go-ethereum_exporter_local
- url: 'http://127.0.0.1:9305/metrics'
- - name: kyototycoon_exporter_local
- url: 'http://127.0.0.1:9306/metrics'
- - name: audisto_exporter_local
- url: 'http://127.0.0.1:9307/metrics'
- - name: kafka_exporter_local
- url: 'http://127.0.0.1:9308/metrics'
- - name: fluentd_exporter_local
- url: 'http://127.0.0.1:9309/metrics'
- - name: open_vswitch_exporter_local
- url: 'http://127.0.0.1:9310/metrics'
- - name: iota_exporter_local
- url: 'http://127.0.0.1:9311/metrics'
- - name: cloudprober_exporter_local
- url: 'http://127.0.0.1:9313/metrics'
- - name: eris_exporter_local
- url: 'http://127.0.0.1:9314/metrics'
- - name: centrifugo_exporter_local
- url: 'http://127.0.0.1:9315/metrics'
- - name: tado_exporter_local
- url: 'http://127.0.0.1:9316/metrics'
- - name: tellstick_local_exporter_local
- url: 'http://127.0.0.1:9317/metrics'
- - name: conntrack_exporter_local
- url: 'http://127.0.0.1:9318/metrics'
- - name: flexlm_exporter_local
- url: 'http://127.0.0.1:9319/metrics'
- - name: consul_telemetry_exporter_local
- url: 'http://127.0.0.1:9320/metrics'
- - name: spring_boot_actuator_exporter_local
- url: 'http://127.0.0.1:9321/metrics'
- - name: haproxy_abuser_exporter_local
- url: 'http://127.0.0.1:9322/metrics'
- - name: docker_prometheus_metrics_local
- url: 'http://127.0.0.1:9323/metrics'
- - name: bird_routing_daemon_exporter_local
- url: 'http://127.0.0.1:9324/metrics'
- - name: ovirt_exporter_local
- url: 'http://127.0.0.1:9325/metrics'
- - name: junos_exporter_local
- url: 'http://127.0.0.1:9326/metrics'
- - name: s3_exporter_local
- url: 'http://127.0.0.1:9327/metrics'
- - name: openldap_syncrepl_exporter_local
- url: 'http://127.0.0.1:9328/metrics'
- - name: cups_exporter_local
- url: 'http://127.0.0.1:9329/metrics'
- - name: openldap_metrics_exporter_local
- url: 'http://127.0.0.1:9330/metrics'
- - name: influx-spout_prometheus_metrics_local
- url: 'http://127.0.0.1:9331/metrics'
- - name: network_exporter_local
- url: 'http://127.0.0.1:9332/metrics'
- - name: vault_pki_exporter_local
- url: 'http://127.0.0.1:9333/metrics'
- - name: ejabberd_exporter_local
- url: 'http://127.0.0.1:9334/metrics'
- - name: nexsan_exporter_local
- url: 'http://127.0.0.1:9335/metrics'
- - name: mediacom_internet_usage_exporter_local
- url: 'http://127.0.0.1:9336/metrics'
- - name: mqttgateway_local
- url: 'http://127.0.0.1:9337/metrics'
- - name: aws_s3_exporter_local
- url: 'http://127.0.0.1:9339/metrics'
- - name: financial_quotes_exporter_local
- url: 'http://127.0.0.1:9340/metrics'
- - name: slurm_exporter_local
- url: 'http://127.0.0.1:9341/metrics'
- - name: frr_exporter_local
- url: 'http://127.0.0.1:9342/metrics'
- - name: gridserver_exporter_local
- url: 'http://127.0.0.1:9343/metrics'
- - name: mqtt_exporter_local
- url: 'http://127.0.0.1:9344/metrics'
- - name: ruckus_smartzone_exporter_local
- url: 'http://127.0.0.1:9345/metrics'
- - name: ping_exporter_local
- url: 'http://127.0.0.1:9346/metrics'
- - name: junos_exporter_local
- url: 'http://127.0.0.1:9347/metrics'
- - name: bigquery_exporter_local
- url: 'http://127.0.0.1:9348/metrics'
- - name: configurable_elasticsearch_query_exporter_local
- url: 'http://127.0.0.1:9349/metrics'
- - name: thousandeyes_exporter_local
- url: 'http://127.0.0.1:9350/metrics'
- - name: wal-e_wal-g_exporter_local
- url: 'http://127.0.0.1:9351/metrics'
- - name: nature_remo_exporter_local
- url: 'http://127.0.0.1:9352/metrics'
- - name: ceph_exporter_local
- url: 'http://127.0.0.1:9353/metrics'
- - name: deluge_exporter_local
- url: 'http://127.0.0.1:9354/metrics'
- - name: nightwatchjs_exporter_local
- url: 'http://127.0.0.1:9355/metrics'
- - name: pacemaker_exporter_local
- url: 'http://127.0.0.1:9356/metrics'
- - name: p1_exporter_local
- url: 'http://127.0.0.1:9357/metrics'
- - name: performance_counters_exporter_local
- url: 'http://127.0.0.1:9358/metrics'
- - name: sidekiq_prometheus_local
- url: 'http://127.0.0.1:9359/metrics'
- - name: powershell_exporter_local
- url: 'http://127.0.0.1:9360/metrics'
- - name: scaleway_sd_exporter_local
- url: 'http://127.0.0.1:9361/metrics'
- - name: cisco_exporter_local
- url: 'http://127.0.0.1:9362/metrics'
- - name: clickhouse_local
- url: 'http://127.0.0.1:9363/metrics'
- - name: continent8_exporter_local
- url: 'http://127.0.0.1:9364/metrics'
- - name: cumulus_linux_exporter_local
- url: 'http://127.0.0.1:9365/metrics'
- - name: haproxy_stick_table_exporter_local
- url: 'http://127.0.0.1:9366/metrics'
- - name: teamspeak3_exporter_local
- url: 'http://127.0.0.1:9367/metrics'
- - name: ethereum_client_exporter_local
- url: 'http://127.0.0.1:9368/metrics'
- - name: prometheus_pushprox_local
- url: 'http://127.0.0.1:9369/metrics'
- - name: u-bmc_local
- url: 'http://127.0.0.1:9370/metrics'
- - name: conntrack-stats-exporter_local
- url: 'http://127.0.0.1:9371/metrics'
- - name: appmetrics_prometheus_local
- url: 'http://127.0.0.1:9372/metrics'
- - name: gcp_service_discovery_local
- url: 'http://127.0.0.1:9373/metrics'
- - name: smokeping_prober_local
- url: 'http://127.0.0.1:9374/metrics'
- - name: particle_exporter_local
- url: 'http://127.0.0.1:9375/metrics'
- - name: falco_local
- url: 'http://127.0.0.1:9376/metrics'
- - name: cisco_aci_exporter_local
- url: 'http://127.0.0.1:9377/metrics'
- - name: etcd_grpc_proxy_exporter_local
- url: 'http://127.0.0.1:9378/metrics'
- - name: etcd_exporter_local
- url: 'http://127.0.0.1:9379/metrics'
- - name: mythtv_exporter_local
- url: 'http://127.0.0.1:9380/metrics'
- - name: kafka_zookeeper_exporter_local
- url: 'http://127.0.0.1:9381/metrics'
- - name: frrouting_exporter_local
- url: 'http://127.0.0.1:9382/metrics'
- - name: aws_health_exporter_local
- url: 'http://127.0.0.1:9383/metrics'
- - name: aws_sqs_exporter_local
- url: 'http://127.0.0.1:9384/metrics'
- - name: apcupsdexporter_local
- url: 'http://127.0.0.1:9385/metrics'
- - name: httpd-exporter_local
- url: 'http://127.0.0.1:9386/metrics'
- - name: tankerkönig_api_exporter_local
- url: 'http://127.0.0.1:9386/metrics'
- - name: sabnzbd_exporter_local
- url: 'http://127.0.0.1:9387/metrics'
- - name: linode_exporter_local
- url: 'http://127.0.0.1:9388/metrics'
- - name: scylla-cluster-tests_exporter_local
- url: 'http://127.0.0.1:9389/metrics'
- - name: kannel_exporter_local
- url: 'http://127.0.0.1:9390/metrics'
- - name: concourse_prometheus_metrics_local
- url: 'http://127.0.0.1:9391/metrics'
- - name: generic_command_line_output_exporter_local
- url: 'http://127.0.0.1:9392/metrics'
- - name: patroni_exporter_local
- url: 'http://127.0.0.1:9393/metrics'
- - name: alertmanager_github_webhook_receiver_local
- url: 'http://127.0.0.1:9393/metrics'
- - name: ruby_prometheus_exporter_local
- url: 'http://127.0.0.1:9394/metrics'
- - name: ldap_exporter_local
- url: 'http://127.0.0.1:9395/metrics'
- - name: monerod_exporter_local
- url: 'http://127.0.0.1:9396/metrics'
- - name: comap_local
- url: 'http://127.0.0.1:9397/metrics'
- - name: open_hardware_monitor_exporter_local
- url: 'http://127.0.0.1:9398/metrics'
- - name: prometheus_sql_exporter_local
- url: 'http://127.0.0.1:9399/metrics'
- - name: ripe_atlas_exporter_local
- url: 'http://127.0.0.1:9400/metrics'
- - name: 1-wire_exporter_local
- url: 'http://127.0.0.1:9401/metrics'
- - name: google_cloud_platform_exporter_local
- url: 'http://127.0.0.1:9402/metrics'
- - name: zerto_exporter_local
- url: 'http://127.0.0.1:9403/metrics'
- - name: jmx_exporter_local
- url: 'http://127.0.0.1:9404/metrics'
- - name: discourse_exporter_local
- url: 'http://127.0.0.1:9405/metrics'
- - name: hhvm_exporter_local
- url: 'http://127.0.0.1:9406/metrics'
- - name: obs_studio_exporter_local
- url: 'http://127.0.0.1:9407/metrics'
- - name: rds_enhanced_monitoring_exporter_local
- url: 'http://127.0.0.1:9408/metrics'
- - name: ovn-kubernetes_master_exporter_local
- url: 'http://127.0.0.1:9409/metrics'
- - name: ovn-kubernetes_node_exporter_local
- url: 'http://127.0.0.1:9410/metrics'
- - name: softether_exporter_local
- url: 'http://127.0.0.1:9411/metrics'
- - name: sentry_exporter_local
- url: 'http://127.0.0.1:9412/metrics'
- - name: mogilefs_exporter_local
- url: 'http://127.0.0.1:9413/metrics'
- - name: homey_exporter_local
- url: 'http://127.0.0.1:9414/metrics'
- - name: cloudwatch_read_adapter_local
- url: 'http://127.0.0.1:9415/metrics'
- - name: hp_ilo_metrics_exporter_local
- url: 'http://127.0.0.1:9416/metrics'
- - name: ethtool_exporter_local
- url: 'http://127.0.0.1:9417/metrics'
- - name: gearman_exporter_local
- url: 'http://127.0.0.1:9418/metrics'
- - name: rabbitmq_exporter_local
- url: 'http://127.0.0.1:9419/metrics'
- - name: couchbase_exporter_local
- url: 'http://127.0.0.1:9420/metrics'
- - name: apicast_local
- url: 'http://127.0.0.1:9421/metrics'
- - name: jolokia_exporter_local
- url: 'http://127.0.0.1:9422/metrics'
- - name: hp_raid_exporter_local
- url: 'http://127.0.0.1:9423/metrics'
- - name: influxdb_stats_exporter_local
- url: 'http://127.0.0.1:9424/metrics'
- - name: pachyderm_exporter_local
- url: 'http://127.0.0.1:9425/metrics'
- - name: vespa_engine_exporter_local
- url: 'http://127.0.0.1:9426/metrics'
- - name: ping_exporter_local
- url: 'http://127.0.0.1:9427/metrics'
- - name: ssh_exporter_local
- url: 'http://127.0.0.1:9428/metrics'
- - name: uptimerobot_exporter_local
- url: 'http://127.0.0.1:9429/metrics'
- - name: corerad_local
- url: 'http://127.0.0.1:9430/metrics'
- - name: hpfeeds_broker_exporter_local
- url: 'http://127.0.0.1:9431/metrics'
- - name: windows_perflib_exporter_local
- url: 'http://127.0.0.1:9432/metrics'
- - name: knot_exporter_local
- url: 'http://127.0.0.1:9433/metrics'
- - name: opensips_exporter_local
- url: 'http://127.0.0.1:9434/metrics'
- - name: ebpf_exporter_local
- url: 'http://127.0.0.1:9435/metrics'
- - name: mikrotik-exporter_local
- url: 'http://127.0.0.1:9436/metrics'
- - name: dell_emc_isilon_exporter_local
- url: 'http://127.0.0.1:9437/metrics'
- - name: dell_emc_ecs_exporter_local
- url: 'http://127.0.0.1:9438/metrics'
- - name: bitcoind_exporter_local
- url: 'http://127.0.0.1:9439/metrics'
- - name: ravendb_exporter_local
- url: 'http://127.0.0.1:9440/metrics'
- - name: nomad_exporter_local
- url: 'http://127.0.0.1:9441/metrics'
- - name: mcrouter_exporter_local
- url: 'http://127.0.0.1:9442/metrics'
- - name: foundationdb_exporter_local
- url: 'http://127.0.0.1:9444/metrics'
- - name: nvidia_gpu_exporter_local
- url: 'http://127.0.0.1:9445/metrics'
- - name: orange_livebox_dsl_modem_exporter_local
- url: 'http://127.0.0.1:9446/metrics'
- - name: resque_exporter_local
- url: 'http://127.0.0.1:9447/metrics'
- - name: eventstore_exporter_local
- url: 'http://127.0.0.1:9448/metrics'
- - name: omeroserver_exporter_local
- url: 'http://127.0.0.1:9449/metrics'
- - name: habitat_exporter_local
- url: 'http://127.0.0.1:9450/metrics'
- - name: reindexer_exporter_local
- url: 'http://127.0.0.1:9451/metrics'
- - name: freebsd_jail_exporter_local
- url: 'http://127.0.0.1:9452/metrics'
- - name: midonet-kubernetes_local
- url: 'http://127.0.0.1:9453/metrics'
- - name: nvidia_smi_exporter_local
- url: 'http://127.0.0.1:9454/metrics'
- - name: iptables_exporter_local
- url: 'http://127.0.0.1:9455/metrics'
- - name: aws_lambda_exporter_local
- url: 'http://127.0.0.1:9456/metrics'
- - name: files_content_exporter_local
- url: 'http://127.0.0.1:9457/metrics'
- - name: rocketchat_exporter_local
- url: 'http://127.0.0.1:9458/metrics'
- - name: yarn_exporter_local
- url: 'http://127.0.0.1:9459/metrics'
- - name: hana_exporter_local
- url: 'http://127.0.0.1:9460/metrics'
- - name: aws_lambda_read_adapter_local
- url: 'http://127.0.0.1:9461/metrics'
- - name: php_opcache_exporter_local
- url: 'http://127.0.0.1:9462/metrics'
- - name: virgin_media_liberty_global_hub3_exporter_local
- url: 'http://127.0.0.1:9463/metrics'
- - name: opencensus-nodejs_prometheus_exporter_local
- url: 'http://127.0.0.1:9464/metrics'
- - name: hetzner_cloud_k8s_cloud_controller_manager_local
- url: 'http://127.0.0.1:9465/metrics'
- - name: mqtt_push_gateway_local
- url: 'http://127.0.0.1:9466/metrics'
- - name: nginx-prometheus-shiny-exporter_local
- url: 'http://127.0.0.1:9467/metrics'
- - name: nasa-swpc-exporter_local
- url: 'http://127.0.0.1:9468/metrics'
- - name: script_exporter_local
- url: 'http://127.0.0.1:9469/metrics'
- - name: cachet_exporter_local
- url: 'http://127.0.0.1:9470/metrics'
- - name: lxc-exporter_local
- url: 'http://127.0.0.1:9471/metrics'
- - name: hetzner_cloud_csi_driver_controller_local
- url: 'http://127.0.0.1:9472/metrics'
- - name: stellar-core-exporter_local
- url: 'http://127.0.0.1:9473/metrics'
- - name: libvirtd_exporter_local
- url: 'http://127.0.0.1:9474/metrics'
- - name: wgipamd_local
- url: 'http://127.0.0.1:9475/metrics'
- - name: ovn_metrics_exporter_local
- url: 'http://127.0.0.1:9476/metrics'
- - name: csp_violation_report_exporter_local
- url: 'http://127.0.0.1:9477/metrics'
- - name: sentinel_exporter_local
- url: 'http://127.0.0.1:9478/metrics'
- - name: elasticbeat_exporter_local
- url: 'http://127.0.0.1:9479/metrics'
- - name: brigade_exporter_local
- url: 'http://127.0.0.1:9480/metrics'
- - name: drbd9_exporter_local
- url: 'http://127.0.0.1:9481/metrics'
- - name: vector_packet_process_vpp_exporter_local
- url: 'http://127.0.0.1:9482/metrics'
- - name: ibm_app_connect_enterprise_exporter_local
- url: 'http://127.0.0.1:9483/metrics'
- - name: kubedex-exporter_local
- url: 'http://127.0.0.1:9484/metrics'
- - name: emarsys_exporter_local
- url: 'http://127.0.0.1:9485/metrics'
- - name: domoticz_exporter_local
- url: 'http://127.0.0.1:9486/metrics'
- - name: docker_stats_exporter_local
- url: 'http://127.0.0.1:9487/metrics'
- - name: bmw_connected_drive_exporter_local
- url: 'http://127.0.0.1:9488/metrics'
- - name: tezos_node_metrics_exporter_local
- url: 'http://127.0.0.1:9489/metrics'
- - name: exporter_for_docker_libnetwork_plugin_for_ovn_local
- url: 'http://127.0.0.1:9490/metrics'
- - name: docker_container_stats_exporter_docker_ps_local
- url: 'http://127.0.0.1:9491/metrics'
- - name: azure_exporter_monitor_and_usage_local
- url: 'http://127.0.0.1:9492/metrics'
- - name: prosafe_exporter_local
- url: 'http://127.0.0.1:9493/metrics'
- - name: kamailio_exporter_local
- url: 'http://127.0.0.1:9494/metrics'
- - name: ingestor_exporter_local
- url: 'http://127.0.0.1:9495/metrics'
- - name: 389ds_ipa_exporter_local
- url: 'http://127.0.0.1:9496/metrics'
- - name: immudb_exporter_local
- url: 'http://127.0.0.1:9497/metrics'
- - name: tp-link_hs110_exporter_local
- url: 'http://127.0.0.1:9498/metrics'
- - name: smartthings_exporter_local
- url: 'http://127.0.0.1:9499/metrics'
- - name: cassandra_exporter_local
- url: 'http://127.0.0.1:9500/metrics'
- - name: hetznercloud_exporter_local
- url: 'http://127.0.0.1:9501/metrics'
- - name: hetzner_exporter_local
- url: 'http://127.0.0.1:9502/metrics'
- - name: scaleway_exporter_local
- url: 'http://127.0.0.1:9503/metrics'
- - name: github_exporter_local
- url: 'http://127.0.0.1:9504/metrics'
- - name: dockerhub_exporter_local
- url: 'http://127.0.0.1:9505/metrics'
- - name: jenkins_exporter_local
- url: 'http://127.0.0.1:9506/metrics'
- - name: owncloud_exporter_local
- url: 'http://127.0.0.1:9507/metrics'
- - name: ccache_exporter_local
- url: 'http://127.0.0.1:9508/metrics'
- - name: hetzner_storagebox_exporter_local
- url: 'http://127.0.0.1:9509/metrics'
- - name: dummy_exporter_local
- url: 'http://127.0.0.1:9510/metrics'
- - name: cloudera_exporter_local
- url: 'http://127.0.0.1:9512/metrics'
- - name: openconfig_streaming_telemetry_exporter_local
- url: 'http://127.0.0.1:9513/metrics'
- - name: app_stores_exporter_local
- url: 'http://127.0.0.1:9514/metrics'
- - name: swarm-exporter_local
- url: 'http://127.0.0.1:9515/metrics'
- - name: prometheus_speedtest_exporter_local
- url: 'http://127.0.0.1:9516/metrics'
- - name: matroschka_prober_local
- url: 'http://127.0.0.1:9517/metrics'
- - name: crypto_stock_exchanges_funds_exporter_local
- url: 'http://127.0.0.1:9518/metrics'
- - name: acurite_exporter_local
- url: 'http://127.0.0.1:9519/metrics'
- - name: swift_health_exporter_local
- url: 'http://127.0.0.1:9520/metrics'
- - name: ruuvi_exporter_local
- url: 'http://127.0.0.1:9521/metrics'
- - name: tftp_exporter_local
- url: 'http://127.0.0.1:9522/metrics'
- - name: 3cx_exporter_local
- url: 'http://127.0.0.1:9523/metrics'
- - name: loki_exporter_local
- url: 'http://127.0.0.1:9524/metrics'
- - name: alibaba_cloud_exporter_local
- url: 'http://127.0.0.1:9525/metrics'
- - name: kafka_lag_exporter_local
- url: 'http://127.0.0.1:9526/metrics'
- - name: netgear_cable_modem_exporter_local
- url: 'http://127.0.0.1:9527/metrics'
- - name: total_connect_comfort_exporter_local
- url: 'http://127.0.0.1:9528/metrics'
- - name: octoprint_exporter_local
- url: 'http://127.0.0.1:9529/metrics'
- - name: custom_prometheus_exporter_local
- url: 'http://127.0.0.1:9530/metrics'
- - name: jfrog_artifactory_exporter_local
- url: 'http://127.0.0.1:9531/metrics'
- - name: snyk_exporter_local
- url: 'http://127.0.0.1:9532/metrics'
- - name: network_exporter_for_cisco_api_local
- url: 'http://127.0.0.1:9533/metrics'
- - name: humio_exporter_local
- url: 'http://127.0.0.1:9534/metrics'
- - name: cron_exporter_local
- url: 'http://127.0.0.1:9535/metrics'
- - name: ipsec_exporter_local
- url: 'http://127.0.0.1:9536/metrics'
- - name: cri-o_local
- url: 'http://127.0.0.1:9537/metrics'
- - name: bull_queue_local
- url: 'http://127.0.0.1:9538/metrics'
- - name: modemmanager_exporter_local
- url: 'http://127.0.0.1:9539/metrics'
- - name: emq_exporter_local
- url: 'http://127.0.0.1:9540/metrics'
- - name: smartmon_exporter_local
- url: 'http://127.0.0.1:9541/metrics'
- - name: sakuracloud_exporter_local
- url: 'http://127.0.0.1:9542/metrics'
- - name: kube2iam_exporter_local
- url: 'http://127.0.0.1:9543/metrics'
- - name: pgio_exporter_local
- url: 'http://127.0.0.1:9544/metrics'
- - name: hp_ilo4_exporter_local
- url: 'http://127.0.0.1:9545/metrics'
- - name: pwrstat-exporter_local
- url: 'http://127.0.0.1:9546/metrics'
- - name: patroni_exporter_local
- url: 'http://127.0.0.1:9547/metrics'
- - name: trafficserver_exporter_local
- url: 'http://127.0.0.1:9548/metrics'
- - name: raspberry_exporter_local
- url: 'http://127.0.0.1:9549/metrics'
- - name: rtl_433_exporter_local
- url: 'http://127.0.0.1:9550/metrics'
- - name: hostapd_exporter_local
- url: 'http://127.0.0.1:9551/metrics'
- - name: alpine_apk_exporter_local
- url: 'http://127.0.0.1:9552/metrics'
- - name: aws_elastic_beanstalk_exporter_local
- url: 'http://127.0.0.1:9552/metrics'
- - name: apt_exporter_local
- url: 'http://127.0.0.1:9553/metrics'
- - name: acc_server_manager_exporter_local
- url: 'http://127.0.0.1:9554/metrics'
- - name: sona_exporter_local
- url: 'http://127.0.0.1:9555/metrics'
- - name: routinator_exporter_local
- url: 'http://127.0.0.1:9556/metrics'
- - name: mysql_count_exporter_local
- url: 'http://127.0.0.1:9557/metrics'
- - name: systemd_exporter_local
- url: 'http://127.0.0.1:9558/metrics'
- - name: ntp_exporter_local
- url: 'http://127.0.0.1:9559/metrics'
- - name: sql_queries_exporter_local
- url: 'http://127.0.0.1:9560/metrics'
- - name: qbittorrent_exporter_local
- url: 'http://127.0.0.1:9561/metrics'
- - name: ptv_xserver_exporter_local
- url: 'http://127.0.0.1:9562/metrics'
- - name: kibana_exporter_local
- url: 'http://127.0.0.1:9563/metrics'
- - name: purpleair_exporter_local
- url: 'http://127.0.0.1:9564/metrics'
- - name: bminer_exporter_local
- url: 'http://127.0.0.1:9565/metrics'
- - name: rabbitmq_cli_consumer_local
- url: 'http://127.0.0.1:9566/metrics'
- - name: alertsnitch_local
- url: 'http://127.0.0.1:9567/metrics'
- - name: dell_poweredge_ipmi_exporter_local
- url: 'http://127.0.0.1:9568/metrics'
- - name: hvpa_controller_local
- url: 'http://127.0.0.1:9569/metrics'
- - name: vpa_exporter_local
- url: 'http://127.0.0.1:9570/metrics'
- - name: helm_exporter_local
- url: 'http://127.0.0.1:9571/metrics'
- - name: ctld_exporter_local
- url: 'http://127.0.0.1:9572/metrics'
- - name: jkstatus_exporter_local
- url: 'http://127.0.0.1:9573/metrics'
- - name: opentracker_exporter_local
- url: 'http://127.0.0.1:9574/metrics'
- - name: poweradmin_server_monitor_exporter_local
- url: 'http://127.0.0.1:9575/metrics'
- - name: exabgp_exporter_local
- url: 'http://127.0.0.1:9576/metrics'
- - name: aria2_exporter_local
- url: 'http://127.0.0.1:9578/metrics'
- - name: iperf3_exporter_local
- url: 'http://127.0.0.1:9579/metrics'
- - name: azure_service_bus_exporter_local
- url: 'http://127.0.0.1:9580/metrics'
- - name: codenotary_vcn_exporter_local
- url: 'http://127.0.0.1:9581/metrics'
- - name: signatory_a_remote_operation_signer_for_tezos_local
- url: 'http://127.0.0.1:9583/metrics'
- - name: bunnycdn_exporter_local
- url: 'http://127.0.0.1:9584/metrics'
- - name: opvizor_performance_analyzer_process_exporter_local
- url: 'http://127.0.0.1:9585/metrics'
- - name: nfs-ganesha_exporter_local
- url: 'http://127.0.0.1:9587/metrics'
- - name: ltsv-tailer_exporter_local
- url: 'http://127.0.0.1:9588/metrics'
- - name: goflow_exporter_local
- url: 'http://127.0.0.1:9589/metrics'
- - name: flow_exporter_local
- url: 'http://127.0.0.1:9590/metrics'
- - name: srcds_exporter_local
- url: 'http://127.0.0.1:9591/metrics'
- - name: gcp_quota_exporter_local
- url: 'http://127.0.0.1:9592/metrics'
- - name: lighthouse_exporter_local
- url: 'http://127.0.0.1:9593/metrics'
- - name: plex_exporter_local
- url: 'http://127.0.0.1:9594/metrics'
- - name: netio_exporter_local
- url: 'http://127.0.0.1:9595/metrics'
- - name: azure_elastic_sql_exporter_local
- url: 'http://127.0.0.1:9596/metrics'
- - name: github_vulnerability_alerts_exporter_local
- url: 'http://127.0.0.1:9597/metrics'
- - name: pirograph_exporter_local
- url: 'http://127.0.0.1:9599/metrics'
- - name: circleci_exporter_local
- url: 'http://127.0.0.1:9600/metrics'
- - name: messagebird_exporter_local
- url: 'http://127.0.0.1:9601/metrics'
- - name: modbus_exporter_local
- url: 'http://127.0.0.1:9602/metrics'
- - name: xen_exporter_using_xenlight_local
- url: 'http://127.0.0.1:9603/metrics'
- - name: xmpp_blackbox_exporter_local
- url: 'http://127.0.0.1:9604/metrics'
- - name: fping-exporter_local
- url: 'http://127.0.0.1:9605/metrics'
- - name: ecr-exporter_local
- url: 'http://127.0.0.1:9606/metrics'
- - name: raspberry_pi_sense_hat_exporter_local
- url: 'http://127.0.0.1:9607/metrics'
- - name: ironic_prometheus_exporter_local
- url: 'http://127.0.0.1:9608/metrics'
- - name: netapp_exporter_local
- url: 'http://127.0.0.1:9609/metrics'
- - name: kubernetes_exporter_local
- url: 'http://127.0.0.1:9610/metrics'
- - name: speedport_exporter_local
- url: 'http://127.0.0.1:9611/metrics'
- - name: opflex-agent_exporter_local
- url: 'http://127.0.0.1:9612/metrics'
- - name: azure_health_exporter_local
- url: 'http://127.0.0.1:9613/metrics'
- - name: nut_upsc_exporter_local
- url: 'http://127.0.0.1:9614/metrics'
- - name: mellanox_mlx5_exporter_local
- url: 'http://127.0.0.1:9615/metrics'
- - name: mailgun_exporter_local
- url: 'http://127.0.0.1:9616/metrics'
- - name: pi-hole_exporter_local
- url: 'http://127.0.0.1:9617/metrics'
- - name: stellar-account-exporter_local
- url: 'http://127.0.0.1:9618/metrics'
- - name: stellar-horizon-exporter_local
- url: 'http://127.0.0.1:9619/metrics'
- - name: rundeck_exporter_local
- url: 'http://127.0.0.1:9620/metrics'
- - name: opennebula_exporter_local
- url: 'http://127.0.0.1:9621/metrics'
- - name: bmc_exporter_local
- url: 'http://127.0.0.1:9622/metrics'
- - name: tc4400_exporter_local
- url: 'http://127.0.0.1:9623/metrics'
- - name: pact_broker_exporter_local
- url: 'http://127.0.0.1:9624/metrics'
- - name: bareos_exporter_local
- url: 'http://127.0.0.1:9625/metrics'
- - name: hockeypuck_local
- url: 'http://127.0.0.1:9626/metrics'
- - name: artifactory_exporter_local
- url: 'http://127.0.0.1:9627/metrics'
- - name: solace_pubsub_plus_exporter_local
- url: 'http://127.0.0.1:9628/metrics'
- - name: prometheus_gitlab_notifier_local
- url: 'http://127.0.0.1:9629/metrics'
- - name: nftables_exporter_local
- url: 'http://127.0.0.1:9630/metrics'
- - name: a_op5_monitor_exporter_local
- url: 'http://127.0.0.1:9631/metrics'
- - name: opflex-server_exporter_local
- url: 'http://127.0.0.1:9632/metrics'
- - name: smartctl_exporter_local
- url: 'http://127.0.0.1:9633/metrics'
- - name: aerospike_ttl_exporter_local
- url: 'http://127.0.0.1:9634/metrics'
- - name: fail2ban_exporter_local
- url: 'http://127.0.0.1:9635/metrics'
- - name: exim4_exporter_local
- url: 'http://127.0.0.1:9636/metrics'
- - name: kubeversion_exporter_local
- url: 'http://127.0.0.1:9637/metrics'
- - name: a_icinga2_exporter_local
- url: 'http://127.0.0.1:9638/metrics'
- - name: scriptable_jmx_exporter_local
- url: 'http://127.0.0.1:9639/metrics'
- - name: logstash_output_exporter_local
- url: 'http://127.0.0.1:9640/metrics'
- - name: coturn_exporter_local
- url: 'http://127.0.0.1:9641/metrics'
- - name: bugsnag_exporter_local
- url: 'http://127.0.0.1:9642/metrics'
- - name: exporter_for_grouped_process_local
- url: 'http://127.0.0.1:9644/metrics'
- - name: burp_exporter_local
- url: 'http://127.0.0.1:9645/metrics'
- - name: locust_exporter_local
- url: 'http://127.0.0.1:9646/metrics'
- - name: docker_exporter_local
- url: 'http://127.0.0.1:9647/metrics'
- - name: ntpmon_exporter_local
- url: 'http://127.0.0.1:9648/metrics'
- - name: logstash_exporter_local
- url: 'http://127.0.0.1:9649/metrics'
- - name: keepalived_exporter_local
- url: 'http://127.0.0.1:9650/metrics'
- - name: storj_exporter_local
- url: 'http://127.0.0.1:9651/metrics'
- - name: praefect_exporter_local
- url: 'http://127.0.0.1:9652/metrics'
- - name: jira_issues_exporter_local
- url: 'http://127.0.0.1:9653/metrics'
- - name: ansible_galaxy_exporter_local
- url: 'http://127.0.0.1:9654/metrics'
- - name: kube-netc_exporter_local
- url: 'http://127.0.0.1:9655/metrics'
- - name: matrix_local
- url: 'http://127.0.0.1:9656/metrics'
- - name: krill_exporter_local
- url: 'http://127.0.0.1:9657/metrics'
- - name: sap_hana_sql_exporter_local
- url: 'http://127.0.0.1:9658/metrics'
- - name: kaiterra_laser_egg_exporter_local
- url: 'http://127.0.0.1:9660/metrics'
- - name: hashpipe_exporter_local
- url: 'http://127.0.0.1:9661/metrics'
- - name: pms5003_particulate_matter_sensor_exporter_local
- url: 'http://127.0.0.1:9662/metrics'
- - name: sap_nwrfc_exporter_local
- url: 'http://127.0.0.1:9663/metrics'
- - name: linux_ha_clusterlabs_exporter_local
- url: 'http://127.0.0.1:9664/metrics'
- - name: senderscore_exporter_local
- url: 'http://127.0.0.1:9665/metrics'
- - name: alertmanager_silences_exporter_local
- url: 'http://127.0.0.1:9666/metrics'
- - name: smtpd_exporter_local
- url: 'http://127.0.0.1:9667/metrics'
- - name: suses_sap_hana_exporter_local
- url: 'http://127.0.0.1:9668/metrics'
- - name: panopticon_native_metrics_local
- url: 'http://127.0.0.1:9669/metrics'
- - name: flare_native_metrics_local
- url: 'http://127.0.0.1:9670/metrics'
- - name: aws_ec2_spot_exporter_local
- url: 'http://127.0.0.1:9671/metrics'
- - name: aircontrol_co2_exporter_local
- url: 'http://127.0.0.1:9672/metrics'
- - name: co2_monitor_exporter_local
- url: 'http://127.0.0.1:9673/metrics'
- - name: google_analytics_exporter_local
- url: 'http://127.0.0.1:9674/metrics'
- - name: docker_swarm_exporter_local
- url: 'http://127.0.0.1:9675/metrics'
- - name: hetzner_traffic_exporter_local
- url: 'http://127.0.0.1:9676/metrics'
- - name: aws_ecs_exporter_local
- url: 'http://127.0.0.1:9677/metrics'
- - name: ircd_user_exporter_local
- url: 'http://127.0.0.1:9678/metrics'
- - name: aws_health_exporter_local
- url: 'http://127.0.0.1:9679/metrics'
- - name: suses_sap_host_exporter_local
- url: 'http://127.0.0.1:9680/metrics'
- - name: myfitnesspal_exporter_local
- url: 'http://127.0.0.1:9681/metrics'
- - name: powder_monkey_local
- url: 'http://127.0.0.1:9682/metrics'
- - name: infiniband_exporter_local
- url: 'http://127.0.0.1:9683/metrics'
- - name: kibana_standalone_exporter_local
- url: 'http://127.0.0.1:9684/metrics'
- - name: eideticom_local
- url: 'http://127.0.0.1:9685/metrics'
- - name: aws_ec2_exporter_local
- url: 'http://127.0.0.1:9686/metrics'
- - name: gitaly_blackbox_exporter_local
- url: 'http://127.0.0.1:9687/metrics'
- - name: lan_server_modbus_exporter_local
- url: 'http://127.0.0.1:9689/metrics'
- - name: tcp_longterm_connection_exporter_local
- url: 'http://127.0.0.1:9690/metrics'
- - name: celery_redis_exporter_local
- url: 'http://127.0.0.1:9691/metrics'
- - name: gcp_gce_exporter_local
- url: 'http://127.0.0.1:9692/metrics'
- - name: sigma_air_manager_exporter_local
- url: 'http://127.0.0.1:9693/metrics'
- - name: per-user_usage_exporter_for_cisco_xe_lnss_local
- url: 'http://127.0.0.1:9694/metrics'
- - name: cifs_exporter_local
- url: 'http://127.0.0.1:9695/metrics'
- - name: jitsi_videobridge_exporter_local
- url: 'http://127.0.0.1:9696/metrics'
- - name: tendermint_blockchain_exporter_local
- url: 'http://127.0.0.1:9697/metrics'
- - name: integrated_dell_remote_access_controller_idrac_exporter_local
- url: 'http://127.0.0.1:9698/metrics'
- - name: pyncette_exporter_local
- url: 'http://127.0.0.1:9699/metrics'
- - name: jitsi_meet_exporter_local
- url: 'http://127.0.0.1:9700/metrics'
- - name: workbook_exporter_local
- url: 'http://127.0.0.1:9701/metrics'
- - name: homeplug_plc_exporter_local
- url: 'http://127.0.0.1:9702/metrics'
- - name: vircadia_local
- url: 'http://127.0.0.1:9703/metrics'
- - name: linux_tc_exporter_local
- url: 'http://127.0.0.1:9704/metrics'
- - name: upc_connect_box_exporter_local
- url: 'http://127.0.0.1:9705/metrics'
- - name: postfix_exporter_local
- url: 'http://127.0.0.1:9706/metrics'
- - name: radarr_exporter_local
- url: 'http://127.0.0.1:9707/metrics'
- - name: sonarr_exporter_local
- url: 'http://127.0.0.1:9708/metrics'
- - name: hadoop_hdfs_fsimage_exporter_local
- url: 'http://127.0.0.1:9709/metrics'
- - name: nut-exporter_local
- url: 'http://127.0.0.1:9710/metrics'
- - name: cloudflare_flan_scan_report_exporter_local
- url: 'http://127.0.0.1:9711/metrics'
- - name: siemens_s7_plc_exporter_local
- url: 'http://127.0.0.1:9712/metrics'
- - name: glusterfs_exporter_local
- url: 'http://127.0.0.1:9713/metrics'
- - name: fritzbox_exporter_local
- url: 'http://127.0.0.1:9714/metrics'
- - name: twincat_ads_web_service_exporter_local
- url: 'http://127.0.0.1:9715/metrics'
- - name: signald_webhook_receiver_local
- url: 'http://127.0.0.1:9716/metrics'
- - name: tplink_easysmart_switch_exporter_local
- url: 'http://127.0.0.1:9717/metrics'
- - name: warp10_exporter_local
- url: 'http://127.0.0.1:9718/metrics'
- - name: pgpool-ii_exporter_local
- url: 'http://127.0.0.1:9719/metrics'
- - name: moodle_db_exporter_local
- url: 'http://127.0.0.1:9720/metrics'
- - name: gtp_exporter_local
- url: 'http://127.0.0.1:9721/metrics'
- - name: miele_exporter_local
- url: 'http://127.0.0.1:9722/metrics'
- - name: freeswitch_exporter_local
- url: 'http://127.0.0.1:9724/metrics'
- - name: sunnyboy_exporter_local
- url: 'http://127.0.0.1:9725/metrics'
- - name: python_rq_exporter_local
- url: 'http://127.0.0.1:9726/metrics'
- - name: ctdb_exporter_local
- url: 'http://127.0.0.1:9727/metrics'
- - name: nginx-rtmp_exporter_local
- url: 'http://127.0.0.1:9728/metrics'
- - name: libvirtd_exporter_local
- url: 'http://127.0.0.1:9729/metrics'
- - name: lynis_exporter_local
- url: 'http://127.0.0.1:9730/metrics'
- - name: nebula_mam_exporter_local
- url: 'http://127.0.0.1:9731/metrics'
- - name: nftables_exporter_local
- url: 'http://127.0.0.1:9732/metrics'
- - name: honeypot_exporter_local
- url: 'http://127.0.0.1:9733/metrics'
- - name: a10-networks_prometheus_exporter_local
- url: 'http://127.0.0.1:9734/metrics'
- - name: webweaver_local
- url: 'http://127.0.0.1:9735/metrics'
- - name: mongodb_query_exporter_local
- url: 'http://127.0.0.1:9736/metrics'
- - name: folding_home_exporter_local
- url: 'http://127.0.0.1:9737/metrics'
- - name: processor_counter_monitor_exporter_local
- url: 'http://127.0.0.1:9738/metrics'
- - name: kafka_consumer_lag_monitoring_local
- url: 'http://127.0.0.1:9739/metrics'
- - name: flightdeck_local
- url: 'http://127.0.0.1:9740/metrics'
- - name: ibm_spectrum_exporter_local
- url: 'http://127.0.0.1:9741/metrics'
- - name: transmission-exporter_local
- url: 'http://127.0.0.1:9742/metrics'
- - name: sma-exporter_local
- url: 'http://127.0.0.1:9743/metrics'
- - name: site24x7_exporter_local
- url: 'http://127.0.0.1:9803/metrics'
- - name: envoy_proxy_local
- url: 'http://127.0.0.1:9901/metrics'
- - name: nginx_vts_exporter_local
- url: 'http://127.0.0.1:9913/metrics'
- - name: login_exporter_local
- url: 'http://127.0.0.1:9980/metrics'
- - name: filestat_exporter_local
- url: 'http://127.0.0.1:9943/metrics'
- - name: sia_exporter_local
- url: 'http://127.0.0.1:9983/metrics'
- - name: couchdb_exporter_local
- url: 'http://127.0.0.1:9984/metrics'
- - name: netapp_solidfire_exporter_local
- url: 'http://127.0.0.1:9987/metrics'
- - name: wildfly_exporter_local
- url: 'http://127.0.0.1:9990/metrics'
- - name: prometheus-jdbc-exporter_local
- url: 'http://127.0.0.1:5555/metrics'
- - name: midonet_agent_local
- url: 'http://127.0.0.1:7300/metrics'
- - name: traefik_local
- url: 'http://127.0.0.1:8080/metrics'
- expected_prefix: 'traefik_'
- - name: trickster_local
- url: 'http://127.0.0.1:8082/metrics'
- - name: fawkes_local
- url: 'http://127.0.0.1:8088/metrics'
- - name: prom2teams_local
- url: 'http://127.0.0.1:8089/metrics'
- - name: phabricator_webhook_for_alertmanager_local
- url: 'http://127.0.0.1:8292/metrics'
- - name: ha_proxy_v2_plus_local
- url: 'http://127.0.0.1:8404/metrics'
- - name: rds_exporter_local
- url: 'http://127.0.0.1:9042/metrics'
- - name: telegram_bot_for_alertmanager_local
- url: 'http://127.0.0.1:9087/metrics'
- - name: jiralert_local
- url: 'http://127.0.0.1:9097/metrics'
- - name: storidge_exporter_local
- url: 'http://127.0.0.1:16995/metrics'
- - name: transmission_exporter_local
- url: 'http://127.0.0.1:19091/metrics'
- - name: fluent_plugin_for_prometheus_local
- url: 'http://127.0.0.1:24231/metrics'
- - name: proxysql_exporter_local
- url: 'http://127.0.0.1:42004/metrics'
- - name: pcp_exporter_local
- url: 'http://127.0.0.1:44323/metrics'
- - name: dcos_exporter_local
- url: 'http://127.0.0.1:61091/metrics'
- - name: caddy_local
- url: 'http://localhost:2019/metrics'
- expected_prefix: 'caddy_'
- # Run Geth with --metrics flag.
- # Docs: https://geth.ethereum.org/docs/interface/metrics
- - name: geth_local
- url: 'http://127.0.0.1:6060/debug/metrics/prometheus'
- expected_prefix: 'eth_'
- # Run OpenEthereum with --metrics flag.
- # Docs: https://openethereum.github.io/Configuring-OpenEthereum.html?q=metrics-interface
- - name: openethereum_local
- url: 'http://127.0.0.1:3000/metrics'
- expected_prefix: 'blockchaincache_'
- - name: pushgateway_local
- url: 'http://127.0.0.1:9091/metrics'
- expected_prefix: 'pushgateway_'
- selector:
- allow:
- - pushgateway_*
- # Run Nethermind with --Metrics.Enabled true.
- # Docs: https://docs.nethermind.io/nethermind/ethereum-client/metrics/setting-up-local-metrics-infrastracture
- - name: nethermind_local
- url: 'http://127.0.0.1:9091/metrics'
- expected_prefix: 'nethermind_'
- selector:
- allow:
- - nethermind*
- # Run Besu with --metrics-enabled flag.
- # Docs: https://besu.hyperledger.org/en/stable/HowTo/Monitor/Metrics/
- - name: besu_local
- url: '127.0.0.1:9545'
- expected_prefix: 'besu_'
- - name: crowdsec_local
- url: http://127.0.0.1:6060/metrics
- expected_prefix: 'cs_'
+#jobs:
+# - name: node_exporter_local
+# url: 'http://127.0.0.1:9100/metrics'
diff --git a/config/go.d/proxysql.conf b/config/go.d/proxysql.conf
index 100f21384..8963ed267 100644
--- a/config/go.d/proxysql.conf
+++ b/config/go.d/proxysql.conf
@@ -1,19 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/proxysql
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- # my.cnf
- - name: local
- my.cnf: '/etc/my.cnf'
-
- # stats
- - name: local
- dsn: stats:stats@tcp(127.0.0.1:6032)/
-
- - name: local
- dsn: stats:stats@tcp([::1]:6032)/
-
+#jobs:
+# - name: local
+# dsn: stats:stats@tcp(127.0.0.1:6032)/
diff --git a/config/go.d/pulsar.conf b/config/go.d/pulsar.conf
index 147c8e184..7636eb741 100644
--- a/config/go.d/pulsar.conf
+++ b/config/go.d/pulsar.conf
@@ -1,11 +1,7 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/pulsar
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8080/metrics
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8080/metrics
diff --git a/config/go.d/rabbitmq.conf b/config/go.d/rabbitmq.conf
index 9b1db9f5c..580d2ae6b 100644
--- a/config/go.d/rabbitmq.conf
+++ b/config/go.d/rabbitmq.conf
@@ -1,19 +1,9 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/rabbitmq
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:15672
- username: guest
- password: guest
- collect_queues_metrics: no
-
- - name: local
- url: http://127.0.0.1:15672
- username: guest
- password: guest
- collect_queues_metrics: no
+#jobs:
+# - name: local
+# url: http://localhost:15672
+# username: guest
+# password: guest
+# collect_queues_metrics: no
diff --git a/config/go.d/scaleio.conf b/config/go.d/scaleio.conf
index 7206bab81..cb25d03b1 100644
--- a/config/go.d/scaleio.conf
+++ b/config/go.d/scaleio.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/scaleio
-#update_every : 1
-#autodetection_retry : 0
-#priority : 70000
-
#jobs:
# - name : local
# url : https://127.0.0.1
diff --git a/config/go.d/sd/net_listeners.yaml b/config/go.d/sd/net_listeners.yaml
new file mode 100644
index 000000000..1625cd755
--- /dev/null
+++ b/config/go.d/sd/net_listeners.yaml
@@ -0,0 +1,129 @@
+name: 'network listeners'
+
+discover:
+ net_listeners:
+ tags: "listener"
+
+classify:
+ - name: "Applications"
+ selector: "listener"
+ tags: "app"
+ match:
+ - tags: "activemq"
+ expr: '{{ and (eq .Port "8161") (eq .Comm "activemq") }}'
+ - tags: "apache"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "apache" "httpd") }}'
+ - tags: "bind"
+ expr: '{{ and (eq .Port "8653") (eq .Comm "bind" "named") }}'
+ - tags: "cassandra"
+ expr: '{{ and (eq .Port "7072") (glob .Cmdline "*cassandra*") }}'
+ - tags: "chrony"
+ expr: '{{ and (eq .Port "323") (eq .Comm "chronyd") }}'
+ - tags: "cockroachdb"
+ expr: '{{ and (eq .Port "8080") (eq .Comm "cockroach") }}'
+ - tags: "consul"
+ expr: '{{ and (eq .Port "8500") (eq .Comm "consul") }}'
+ - tags: "coredns"
+ expr: '{{ and (eq .Port "9153") (eq .Comm "coredns") }}'
+ - tags: "couchbase"
+ expr: '{{ and (eq .Port "8091") (glob .Cmdline "*couchbase*") }}'
+ - tags: "couchdb"
+ expr: '{{ and (eq .Port "5984") (glob .Cmdline "*couchdb*") }}'
+ - tags: "dnsdist"
+ expr: '{{ and (eq .Port "8083") (eq .Comm "dnsdist") }}'
+ - tags: "dnsmasq"
+ expr: '{{ and (eq .Port "53") (eq .Comm "dnsmasq") }}'
+ - tags: "docker_engine"
+ expr: '{{ and (eq .Port "9323") (eq .Comm "dockerd") }}'
+ - tags: "elasticsearch"
+ expr: '{{ and (eq .Port "9200") (glob .Cmdline "*elasticsearch*") }}'
+ - tags: "opensearch"
+ expr: '{{ and (eq .Port "9200") (glob .Cmdline "*opensearch*") }}'
+ - tags: "envoy"
+ expr: '{{ and (eq .Port "9901") (eq .Comm "envoy") }}'
+ - tags: "fluentd"
+ expr: '{{ and (eq .Port "24220") (glob .Cmdline "*fluentd*") }}'
+ - tags: "freeradius"
+ expr: '{{ and (eq .Port "18121") (eq .Comm "freeradius") }}'
+ - tags: "geth"
+ expr: '{{ and (eq .Port "6060") (eq .Comm "geth") }}'
+ - tags: "haproxy"
+ expr: '{{ and (eq .Port "8404") (eq .Comm "haproxy") }}'
+ - tags: "hdfs_namenode"
+ expr: '{{ and (eq .Port "9870") (eq .Comm "hadoop") }}'
+ - tags: "hdfs_datanode"
+ expr: '{{ and (eq .Port "9864") (eq .Comm "hadoop") }}'
+ - tags: "kubelet"
+ expr: '{{ and (eq .Port "10250" "10255") (eq .Comm "kubelet") }}'
+ - tags: "kubeproxy"
+ expr: '{{ and (eq .Port "10249") (eq .Comm "kube-proxy") }}'
+ - tags: "lighttpd"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "lighttpd") }}'
+ - tags: "logstash"
+ expr: '{{ and (eq .Port "9600") (glob .Cmdline "*logstash*") }}'
+ - tags: "mongodb"
+ expr: '{{ and (eq .Port "27017") (eq .Comm "mongod") }}'
+ - tags: "mysql"
+ expr: '{{ and (eq .Port "3306") (eq .Comm "mysqld" "mariadb") }}'
+ - tags: "nginx"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "nginx") }}'
+ - tags: "ntpd"
+ expr: '{{ and (eq .Port "123") (eq .Comm "ntpd") }}'
+ - tags: "openvpn"
+ expr: '{{ and (eq .Port "7505") (eq .Comm "openvpn") }}'
+ - tags: "pgbouncer"
+ expr: '{{ and (eq .Port "6432") (eq .Comm "pgbouncer") }}'
+ - tags: "pihole"
+ expr: '{{ and (eq .Port "53") (eq .Comm "pihole-FTL") }}'
+ - tags: "pika"
+ expr: '{{ and (eq .Port "9221") (eq .Comm "pika") }}'
+ - tags: "postgres"
+ expr: '{{ and (eq .Port "5432") (eq .Comm "postgres") }}'
+ - tags: "powerdns"
+ expr: '{{ and (eq .Port "8081") (eq .Comm "pdns_server") }}'
+ - tags: "powerdns_recursor"
+ expr: '{{ and (eq .Port "8081") (eq .Comm "pdns_recursor") }}'
+ - tags: "proxysql"
+ expr: '{{ and (eq .Port "6032") (eq .Comm "proxysql") }}'
+ - tags: "rabbitmq"
+ expr: '{{ and (eq .Port "15672") (glob .Cmdline "*rabbitmq*") }}'
+ - tags: "redis"
+ expr: '{{ and (eq .Port "6379") (eq .Comm "redis-server") }}'
+ - tags: "supervisord"
+ expr: '{{ and (eq .Port "9001") (eq .Comm "supervisord") }}'
+ - tags: "traefik"
+ expr: '{{ and (eq .Port "80" "8080") (eq .Comm "traefik") }}'
+ - tags: "unbound"
+ expr: '{{ and (eq .Port "8953") (eq .Comm "unbound") }}'
+ - tags: "upsd"
+ expr: '{{ and (eq .Port "3493") (eq .Comm "upsd") }}'
+ - tags: "vernemq"
+ expr: '{{ and (eq .Port "8888") (glob .Cmdline "*vernemq*") }}'
+ - tags: "zookeeper"
+ expr: '{{ and (eq .Port "2181" "2182") (glob .Cmdline "*zookeeper*") }}'
+
+compose:
+ - name: "Applications"
+ selector: "app"
+ config:
+ - selector: "activemq"
+ template: |
+ module: activemq
+ name: local
+ url: http://{{.Address}}:{{.Port}}
+ - selector: "apache"
+ template: |
+ module: apache
+ name: local
+ url: http://{{.Address}}:{{.Port}}/server-status?auto
+ - selector: "nginx"
+ template: |
+ module: nginx
+ name: local
+ url: http://{{.Address}}:{{.Port}}/basic_status
+ - selector: "ntpd"
+ template: |
+ module: ntpd
+ name: local
+ address: '127.0.0.1:123'
+ collect_peers: no
diff --git a/config/go.d/snmp.conf b/config/go.d/snmp.conf
index dc4da60f6..c29674956 100644
--- a/config/go.d/snmp.conf
+++ b/config/go.d/snmp.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/snmp
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name: switch
# update_every: 10
diff --git a/config/go.d/solr.conf b/config/go.d/solr.conf
deleted file mode 100644
index c0cc7d095..000000000
--- a/config/go.d/solr.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-## All available configuration options, their descriptions and default values:
-## https://github.com/netdata/go.d.plugin/tree/master/modules/solr
-
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:8983
-
- - name: local
- url: http://127.0.0.1:8983
diff --git a/config/go.d/springboot2.conf b/config/go.d/springboot2.conf
deleted file mode 100644
index 6328bcc57..000000000
--- a/config/go.d/springboot2.conf
+++ /dev/null
@@ -1,13 +0,0 @@
-## All available configuration options, their descriptions and default values:
-## https://github.com/netdata/go.d.plugin/tree/master/modules/springboot2
-
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost:8080/actuator/prometheus
-
- - name: local
- url: http://127.0.0.1:8080/actuator/prometheus
diff --git a/config/go.d/supervisord.conf b/config/go.d/supervisord.conf
index ef5e929fe..c48aee564 100644
--- a/config/go.d/supervisord.conf
+++ b/config/go.d/supervisord.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/supervisord
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: 'http://127.0.0.1:9001/RPC2'
-
- - name: local
- url: 'unix:///run/supervisor.sock'
+#jobs:
+# - name: local
+# url: 'http://127.0.0.1:9001/RPC2'
diff --git a/config/go.d/systemdunits.conf b/config/go.d/systemdunits.conf
index 36507fd05..92fa0420d 100644
--- a/config/go.d/systemdunits.conf
+++ b/config/go.d/systemdunits.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/systemdunits
-#update_every: 10
-#autodetection_retry: 0
-#priority: 70000
-
jobs:
- name: service-units
include:
diff --git a/config/go.d/tengine.conf b/config/go.d/tengine.conf
index 33bbdd6b6..96ddab4e9 100644
--- a/config/go.d/tengine.conf
+++ b/config/go.d/tengine.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/tengine
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://localhost/us
-
- - name: local
- url: http://127.0.0.1/us
+#jobs:
+# - name: local
+# url: http://localhost/us
diff --git a/config/go.d/traefik.conf b/config/go.d/traefik.conf
index f0be8baf7..a2c69f523 100644
--- a/config/go.d/traefik.conf
+++ b/config/go.d/traefik.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/traefik
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8082/metrics
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8082/metrics
diff --git a/config/go.d/unbound.conf b/config/go.d/unbound.conf
index ac3cd4042..156e3e995 100644
--- a/config/go.d/unbound.conf
+++ b/config/go.d/unbound.conf
@@ -1,17 +1,13 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/unbound
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- address: 127.0.0.1:8953
- timeout: 1
- conf_path: /etc/unbound/unbound.conf
- cumulative_stats: no
- use_tls: yes
- tls_skip_verify: yes
- tls_cert: /etc/unbound/unbound_control.pem
- tls_key: /etc/unbound/unbound_control.key
+#jobs:
+# - name: local
+# address: 127.0.0.1:8953
+# timeout: 1
+# conf_path: /etc/unbound/unbound.conf
+# cumulative_stats: no
+# use_tls: yes
+# tls_skip_verify: yes
+# tls_cert: /etc/unbound/unbound_control.pem
+# tls_key: /etc/unbound/unbound_control.key
diff --git a/config/go.d/upsd.conf b/config/go.d/upsd.conf
index 87a546200..a8059f5c6 100644
--- a/config/go.d/upsd.conf
+++ b/config/go.d/upsd.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/upsd
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: upsd
- address: 127.0.0.1:3493
+#jobs:
+# - name: upsd
+# address: 127.0.0.1:3493
diff --git a/config/go.d/vcsa.conf b/config/go.d/vcsa.conf
index 0a7a2e55f..0c6bc36a9 100644
--- a/config/go.d/vcsa.conf
+++ b/config/go.d/vcsa.conf
@@ -1,17 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/vcsa
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name : vcsa1
# url : https://203.0.113.0
# username : admin@vsphere.local
-# password : somepassword
-#
-# - name : vcsa2
-# url : https://203.0.113.10
-# username : admin@vsphere.local
-# password : somepassword
+# password : password
diff --git a/config/go.d/vernemq.conf b/config/go.d/vernemq.conf
index 55877f707..9432f555a 100644
--- a/config/go.d/vernemq.conf
+++ b/config/go.d/vernemq.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/vernemq
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- url: http://127.0.0.1:8888/metrics
+#jobs:
+# - name: local
+# url: http://127.0.0.1:8888/metrics
diff --git a/config/go.d/vsphere.conf b/config/go.d/vsphere.conf
index e3a6c7f1a..43fca9111 100644
--- a/config/go.d/vsphere.conf
+++ b/config/go.d/vsphere.conf
@@ -1,17 +1,12 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/vsphere
-#update_every : 20 # do not decrease the value, vmware real-time stats generated at the 20-seconds specificity.
-#autodetection_retry : 0
-#priority : 70000
-
#jobs:
# - name : vcenter1
# url : https://203.0.113.0
# username : admin@vsphere.local
-# password : somepassword
-#
+# password : password
# - name : vcenter2
# url : https://203.0.113.10
# username : admin@vsphere.local
-# password : somepassword
+# password : password
diff --git a/config/go.d/whoisquery.conf b/config/go.d/whoisquery.conf
index 47e1f0de6..25d930fbd 100644
--- a/config/go.d/whoisquery.conf
+++ b/config/go.d/whoisquery.conf
@@ -1,11 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/whoisquery
-#update_every: 60
-#autodetection_retry: 0
-#priority: 70000
-#
-
-# jobs:
-# - name: example
-# source: example.org
+#jobs:
+# - name: example
+# source: example.org
diff --git a/config/go.d/windows.conf b/config/go.d/windows.conf
index 8a394f356..8deb89178 100644
--- a/config/go.d/windows.conf
+++ b/config/go.d/windows.conf
@@ -1,19 +1,8 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/windows
-#update_every: 5
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: hostname.local
- url: http://hostname.local:9182/metrics
-
- - name: hostname.local
- url: http://127.0.0.1:9182/metrics
-
+#jobs:
# - name: win_server1
# url: http://10.0.0.1:9182/metrics
-#
# - name: win_server2
# url: http://10.0.0.2:9182/metrics
diff --git a/config/go.d/wireguard.conf b/config/go.d/wireguard.conf
index c58d846b2..54e998457 100644
--- a/config/go.d/wireguard.conf
+++ b/config/go.d/wireguard.conf
@@ -1,9 +1,5 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/wireguard
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
jobs:
- name: wireguard
diff --git a/config/go.d/x509check.conf b/config/go.d/x509check.conf
index ba9538a3d..54a1e7eb0 100644
--- a/config/go.d/x509check.conf
+++ b/config/go.d/x509check.conf
@@ -1,10 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/x509check
-#update_every: 60
-#autodetection_retry: 0
-#priority: 70000
-
#jobs:
# - name: my_site_cert
# source: https://my_site.org:443
diff --git a/config/go.d/zookeeper.conf b/config/go.d/zookeeper.conf
index 58607ecd9..ebfe851c3 100644
--- a/config/go.d/zookeeper.conf
+++ b/config/go.d/zookeeper.conf
@@ -1,13 +1,6 @@
## All available configuration options, their descriptions and default values:
## https://github.com/netdata/go.d.plugin/tree/master/modules/zookeeper
-#update_every: 1
-#autodetection_retry: 0
-#priority: 70000
-
-jobs:
- - name: local
- address: 127.0.0.1:2181
-
- - name: local
- address: 127.0.0.1:2182
+#jobs:
+# - name: local
+# address: 127.0.0.1:2181
diff --git a/examples/simple/main.go b/examples/simple/main.go
index 9982b91fc..4f8866265 100644
--- a/examples/simple/main.go
+++ b/examples/simple/main.go
@@ -3,6 +3,7 @@
package main
import (
+ "errors"
"fmt"
"log/slog"
"math/rand"
@@ -24,9 +25,9 @@ type example struct{ module.Base }
func (example) Cleanup() {}
-func (example) Init() bool { return true }
+func (example) Init() error { return nil }
-func (example) Check() bool { return true }
+func (example) Check() error { return nil }
func (example) Charts() *module.Charts {
return &module.Charts{
@@ -40,6 +41,7 @@ func (example) Charts() *module.Charts {
},
}
}
+func (example) Configuration() any { return nil }
func (e *example) Collect() map[string]int64 {
return map[string]int64{
@@ -103,12 +105,12 @@ func main() {
)
p := agent.New(agent.Config{
- Name: name,
- ConfDir: confDir(opt.ConfDir),
- ModulesConfDir: modulesConfDir(opt.ConfDir),
- ModulesSDConfPath: opt.WatchPath,
- RunModule: opt.Module,
- MinUpdateEvery: opt.UpdateEvery,
+ Name: name,
+ ConfDir: confDir(opt.ConfDir),
+ ModulesConfDir: modulesConfDir(opt.ConfDir),
+ ModulesConfWatchPath: opt.WatchPath,
+ RunModule: opt.Module,
+ MinUpdateEvery: opt.UpdateEvery,
})
p.Run()
@@ -116,10 +118,10 @@ func main() {
func parseCLI() *cli.Option {
opt, err := cli.Parse(os.Args)
- if err != nil {
- if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
- os.Exit(0)
- }
+ var flagsErr *flags.Error
+ if errors.As(err, &flagsErr) && errors.Is(flagsErr.Type, flags.ErrHelp) {
+ os.Exit(0)
+ } else {
os.Exit(1)
}
return opt
diff --git a/mocks/blackbox/Dockerfile b/mocks/blackbox/Dockerfile
deleted file mode 100644
index 23c5a80a0..000000000
--- a/mocks/blackbox/Dockerfile
+++ /dev/null
@@ -1,3 +0,0 @@
-FROM alpine
-
-RUN apk add --no-cache curl
\ No newline at end of file
diff --git a/mocks/conf.d/go.d.conf b/mocks/conf.d/go.d.conf
deleted file mode 100644
index 7a179bd68..000000000
--- a/mocks/conf.d/go.d.conf
+++ /dev/null
@@ -1,16 +0,0 @@
-modules:
- activemq: yes
- apache: yes
- consul: yes
- dns_query: yes
- example: no
- freeradius: yes
- httpcheck: yes
- lighttpd: yes
- mongodb: yes
- nginx: yes
- portcheck: yes
- rabbitmq: yes
- solr: yes
- springboot2: yes
- web_log: yes
diff --git a/mocks/conf.d/go.d/apache.conf b/mocks/conf.d/go.d/apache.conf
deleted file mode 100644
index a27444e17..000000000
--- a/mocks/conf.d/go.d/apache.conf
+++ /dev/null
@@ -1,6 +0,0 @@
-jobs:
- - name: local
- url: http://localhost/server-status?auto
-
- - name: local
- url: http://httpd/server-status?auto
diff --git a/mocks/conf.d/go.d/example.conf b/mocks/conf.d/go.d/example.conf
deleted file mode 100644
index 5d6472bba..000000000
--- a/mocks/conf.d/go.d/example.conf
+++ /dev/null
@@ -1,2 +0,0 @@
-jobs:
-- name: example
\ No newline at end of file
diff --git a/mocks/conf.d/go.d/logstash.conf b/mocks/conf.d/go.d/logstash.conf
deleted file mode 100644
index f041a9768..000000000
--- a/mocks/conf.d/go.d/logstash.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-jobs:
-- name: local
- url: http://logstash:9600
\ No newline at end of file
diff --git a/mocks/conf.d/go.d/mongodb.conf b/mocks/conf.d/go.d/mongodb.conf
deleted file mode 100644
index a998fc179..000000000
--- a/mocks/conf.d/go.d/mongodb.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-jobs:
-- name: local
- uri: "mongodb://mongo:27017"
- timeout: 10
- databases:
- includes:
- - "* *"
diff --git a/mocks/conf.d/go.d/springboot2.conf b/mocks/conf.d/go.d/springboot2.conf
deleted file mode 100644
index da3d09233..000000000
--- a/mocks/conf.d/go.d/springboot2.conf
+++ /dev/null
@@ -1,8 +0,0 @@
-jobs:
-- name: local
- url: http://springboot2:8080/actuator/prometheus
-- name: filter
- url: http://springboot2:8080/actuator/prometheus
- uri_filter:
- excludes:
- - = /hello
\ No newline at end of file
diff --git a/mocks/conf.d/go.d/web_log.conf b/mocks/conf.d/go.d/web_log.conf
deleted file mode 100644
index e378cfe79..000000000
--- a/mocks/conf.d/go.d/web_log.conf
+++ /dev/null
@@ -1,30 +0,0 @@
-jobs:
-- name: httpd
- path: /usr/local/apache2/logs/access_log
- categories:
- - name: status
- match: ~ ^/server-status
- histogram: [1, 10, 100, 1000]
-
-- name: httpd
- path: ./mocks/tmp/access_log
- categories:
- - name: status
- match: ~ ^/server-status
- histogram: [1, 10, 100, 1000]
-
-- name: httpd2
- path: /usr/local/apache2/logs/access_log
- aggregate_response_codes: true
- categories:
- - name: status
- match: ~ ^/server-status
- histogram: [1, 10, 100, 1000]
-
-- name: httpd2
- path: ./mocks/tmp/access_log
- aggregate_response_codes: true
- categories:
- - name: status
- match: ~ ^/server-status
- histogram: [1, 10, 100, 1000]
\ No newline at end of file
diff --git a/mocks/httpd/httpd.conf b/mocks/httpd/httpd.conf
deleted file mode 100644
index c911bc1df..000000000
--- a/mocks/httpd/httpd.conf
+++ /dev/null
@@ -1,92 +0,0 @@
-ServerRoot "/usr/local/apache2"
-
-Listen 80
-LoadModule mpm_event_module modules/mod_mpm_event.so
-LoadModule authn_file_module modules/mod_authn_file.so
-LoadModule authn_core_module modules/mod_authn_core.so
-LoadModule authz_host_module modules/mod_authz_host.so
-LoadModule authz_groupfile_module modules/mod_authz_groupfile.so
-LoadModule authz_user_module modules/mod_authz_user.so
-LoadModule authz_core_module modules/mod_authz_core.so
-LoadModule access_compat_module modules/mod_access_compat.so
-LoadModule auth_basic_module modules/mod_auth_basic.so
-LoadModule reqtimeout_module modules/mod_reqtimeout.so
-LoadModule filter_module modules/mod_filter.so
-LoadModule mime_module modules/mod_mime.so
-LoadModule log_config_module modules/mod_log_config.so
-LoadModule logio_module modules/mod_logio.so
-LoadModule env_module modules/mod_env.so
-LoadModule headers_module modules/mod_headers.so
-LoadModule setenvif_module modules/mod_setenvif.so
-LoadModule version_module modules/mod_version.so
-LoadModule unixd_module modules/mod_unixd.so
-LoadModule status_module modules/mod_status.so
-LoadModule autoindex_module modules/mod_autoindex.so
-LoadModule dir_module modules/mod_dir.so
-LoadModule alias_module modules/mod_alias.so
-
-User daemon
-Group daemon
-
-ServerAdmin you@example.com
-
-
- AllowOverride none
- Require all denied
-
-
-DocumentRoot "/usr/local/apache2/htdocs"
-
- Options Indexes FollowSymLinks
- AllowOverride None
- Require all granted
-
-
-
- DirectoryIndex index.html
-
-
-
- Require all denied
-
-
-ErrorLog /usr/local/apache2/logs/error_log
-LogLevel warn
-
-
- LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
- LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %{cookie}n %D %{Host}i \"%{X-Forwarded-For}i\"" onearm
- LogFormat "%h %l %u %t \"%r\" %>s %b" common
-
-
- LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\" %I %O" combinedio
-
-
- CustomLog /usr/local/apache2/logs/access_log combinedio
-
-
-
-
- ScriptAlias /cgi-bin/ "/usr/local/apache2/cgi-bin/"
-
-
-
-
- AllowOverride None
- Options None
- Require all granted
-
-
-
- RequestHeader unset Proxy early
-
-
-
- TypesConfig conf/mime.types
- AddType application/x-compress .Z
- AddType application/x-gzip .gz .tgz
-
-
-
- SetHandler server-status
-
diff --git a/mocks/netdata/netdata.conf b/mocks/netdata/netdata.conf
deleted file mode 100644
index 9f1692572..000000000
--- a/mocks/netdata/netdata.conf
+++ /dev/null
@@ -1,12 +0,0 @@
-[plugins]
- proc = no
- diskspace = no
- cgroups = no
- tc = no
- idlejitter = no
- apps = no
- python.d = no
- charts.d = no
- node.d = no
- fping = no
- go.d = yes
\ No newline at end of file
diff --git a/mocks/springboot2/.gitignore b/mocks/springboot2/.gitignore
deleted file mode 100644
index 836ff4a65..000000000
--- a/mocks/springboot2/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-.gradle/
-.idea/
-springboot2.*
-/build
\ No newline at end of file
diff --git a/mocks/springboot2/Dockerfile b/mocks/springboot2/Dockerfile
deleted file mode 100644
index c7471dff8..000000000
--- a/mocks/springboot2/Dockerfile
+++ /dev/null
@@ -1,12 +0,0 @@
-FROM gradle:5.0-jdk8-alpine as builder
-
-COPY --chown=gradle:gradle . /home/gradle/src
-WORKDIR /home/gradle/src
-RUN gradle build
-
-FROM openjdk:8-jre-alpine
-
-EXPOSE 8080
-COPY --from=builder /home/gradle/src/build/libs/springboot2-0.1.0.jar /app/
-
-CMD ["java", "-jar", "/app/springboot2-0.1.0.jar"]
\ No newline at end of file
diff --git a/mocks/springboot2/build.gradle b/mocks/springboot2/build.gradle
deleted file mode 100644
index 3cbcff14a..000000000
--- a/mocks/springboot2/build.gradle
+++ /dev/null
@@ -1,33 +0,0 @@
-buildscript {
- repositories {
- mavenCentral()
- }
- dependencies {
- classpath("org.springframework.boot:spring-boot-gradle-plugin:2.0.3.RELEASE")
- }
-}
-
-apply plugin: 'java'
-apply plugin: 'eclipse'
-apply plugin: 'idea'
-apply plugin: 'org.springframework.boot'
-apply plugin: 'io.spring.dependency-management'
-
-bootJar {
- baseName = 'springboot2'
- version = '0.1.0'
-}
-
-repositories {
- mavenCentral()
-}
-
-sourceCompatibility = 1.8
-targetCompatibility = 1.8
-
-dependencies {
- compile("org.springframework.boot:spring-boot-starter-web")
- compile("org.springframework.boot:spring-boot-starter-actuator")
- compile("io.micrometer:micrometer-registry-prometheus")
- testCompile('org.springframework.boot:spring-boot-starter-test')
-}
\ No newline at end of file
diff --git a/mocks/springboot2/settings.gradle b/mocks/springboot2/settings.gradle
deleted file mode 100644
index 7fca39b72..000000000
--- a/mocks/springboot2/settings.gradle
+++ /dev/null
@@ -1,2 +0,0 @@
-rootProject.name = 'springboot2'
-
diff --git a/mocks/springboot2/src/main/java/hello/Main.java b/mocks/springboot2/src/main/java/hello/Main.java
deleted file mode 100644
index 524656de5..000000000
--- a/mocks/springboot2/src/main/java/hello/Main.java
+++ /dev/null
@@ -1,23 +0,0 @@
-package hello;
-
-import org.springframework.boot.SpringApplication;
-import org.springframework.boot.autoconfigure.EnableAutoConfiguration;
-import org.springframework.boot.autoconfigure.SpringBootApplication;
-import org.springframework.stereotype.Controller;
-import org.springframework.web.bind.annotation.RequestMapping;
-import org.springframework.web.bind.annotation.ResponseBody;
-
-@Controller
-@SpringBootApplication
-@EnableAutoConfiguration
-public class Main {
- public static void main(String[] args) {
- SpringApplication.run(Main.class, args);
- }
-
- @RequestMapping("/hello")
- @ResponseBody
- public String hello() {
- return "Hello!";
- }
-}
diff --git a/mocks/springboot2/src/main/resources/application.properties b/mocks/springboot2/src/main/resources/application.properties
deleted file mode 100644
index 821da0927..000000000
--- a/mocks/springboot2/src/main/resources/application.properties
+++ /dev/null
@@ -1 +0,0 @@
-management.endpoints.web.exposure.include=*
diff --git a/mocks/tmp/.gitkeep b/mocks/tmp/.gitkeep
deleted file mode 100644
index e69de29bb..000000000
diff --git a/modules/activemq/activemq.go b/modules/activemq/activemq.go
index 109c874de..19a3183d1 100644
--- a/modules/activemq/activemq.go
+++ b/modules/activemq/activemq.go
@@ -4,14 +4,12 @@ package activemq
import (
_ "embed"
- "fmt"
- "strings"
+ "errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -24,290 +22,116 @@ func init() {
})
}
-const (
- keyQueues = "queues"
- keyTopics = "topics"
- keyAdvisory = "Advisory"
-)
-
-var nameReplacer = strings.NewReplacer(".", "_", " ", "")
-
-const (
- defaultMaxQueues = 50
- defaultMaxTopics = 50
- defaultURL = "http://127.0.0.1:8161"
- defaultHTTPTimeout = time.Second
-)
-
-// New creates Example with default values.
func New() *ActiveMQ {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &ActiveMQ{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8161",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
+ Webadmin: "admin",
+ MaxQueues: 50,
+ MaxTopics: 50,
},
-
- MaxQueues: defaultMaxQueues,
- MaxTopics: defaultMaxTopics,
- }
-
- return &ActiveMQ{
- Config: config,
charts: &Charts{},
activeQueues: make(map[string]bool),
activeTopics: make(map[string]bool),
}
}
-// Config is the ActiveMQ module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
- Webadmin string `yaml:"webadmin"`
- MaxQueues int `yaml:"max_queues"`
- MaxTopics int `yaml:"max_topics"`
- QueuesFilter string `yaml:"queues_filter"`
- TopicsFilter string `yaml:"topics_filter"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Webadmin string `yaml:"webadmin" json:"webadmin"`
+ MaxQueues int `yaml:"max_queues" json:"max_queues"`
+ MaxTopics int `yaml:"max_topics" json:"max_topics"`
+ QueuesFilter string `yaml:"queues_filter" json:"queues_filter"`
+ TopicsFilter string `yaml:"topics_filter" json:"topics_filter"`
}
-// ActiveMQ ActiveMQ module.
type ActiveMQ struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ apiClient *apiClient
- apiClient *apiClient
activeQueues map[string]bool
activeTopics map[string]bool
queuesFilter matcher.Matcher
topicsFilter matcher.Matcher
- charts *Charts
}
-// Cleanup makes cleanup.
-func (ActiveMQ) Cleanup() {}
-
-// Init makes initialization.
-func (a *ActiveMQ) Init() bool {
- if a.URL == "" {
- a.Error("URL not set")
- return false
- }
+func (a *ActiveMQ) Configuration() any {
+ return a.Config
+}
- if a.Webadmin == "" {
- a.Error("webadmin root path is not set")
- return false
+func (a *ActiveMQ) Init() error {
+ if err := a.validateConfig(); err != nil {
+ a.Errorf("config validation: %v", err)
+ return err
}
- if a.QueuesFilter != "" {
- f, err := matcher.NewSimplePatternsMatcher(a.QueuesFilter)
- if err != nil {
- a.Errorf("error on creating queues filter : %v", err)
- return false
- }
- a.queuesFilter = matcher.WithCache(f)
+ qf, err := a.initQueuesFiler()
+ if err != nil {
+ a.Error(err)
+ return err
}
+ a.queuesFilter = qf
- if a.TopicsFilter != "" {
- f, err := matcher.NewSimplePatternsMatcher(a.TopicsFilter)
- if err != nil {
- a.Errorf("error on creating topics filter : %v", err)
- return false
- }
- a.topicsFilter = matcher.WithCache(f)
+ tf, err := a.initTopicsFilter()
+ if err != nil {
+ a.Error(err)
+ return err
}
+ a.topicsFilter = tf
client, err := web.NewHTTPClient(a.Client)
if err != nil {
a.Error(err)
- return false
+ return err
}
a.apiClient = newAPIClient(client, a.Request, a.Webadmin)
- return true
-}
-
-// Check makes check.
-func (a *ActiveMQ) Check() bool {
- return len(a.Collect()) > 0
+ return nil
}
-// Charts creates Charts.
-func (a ActiveMQ) Charts() *Charts {
- return a.charts
-}
-
-// Collect collects metrics.
-func (a *ActiveMQ) Collect() map[string]int64 {
- metrics := make(map[string]int64)
-
- var (
- queues *queues
- topics *topics
- err error
- )
-
- if queues, err = a.apiClient.getQueues(); err != nil {
+func (a *ActiveMQ) Check() error {
+ mx, err := a.collect()
+ if err != nil {
a.Error(err)
- return nil
+ return err
}
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
- if topics, err = a.apiClient.getTopics(); err != nil {
- a.Error(err)
- return nil
}
-
- a.processQueues(queues, metrics)
- a.processTopics(topics, metrics)
-
- return metrics
+ return nil
}
-func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) {
- var (
- count = len(a.activeQueues)
- updated = make(map[string]bool)
- unp int
- )
-
- for _, q := range queues.Items {
- if strings.Contains(q.Name, keyAdvisory) {
- continue
- }
-
- if !a.activeQueues[q.Name] {
- if a.MaxQueues != 0 && count > a.MaxQueues {
- unp++
- continue
- }
-
- if !a.filterQueues(q.Name) {
- continue
- }
-
- a.activeQueues[q.Name] = true
- a.addQueueTopicCharts(q.Name, keyQueues)
- }
-
- rname := nameReplacer.Replace(q.Name)
-
- metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount
- metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount
- metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount
- metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount
-
- updated[q.Name] = true
- }
-
- for name := range a.activeQueues {
- if !updated[name] {
- delete(a.activeQueues, name)
- a.removeQueueTopicCharts(name, keyQueues)
- }
- }
-
- if unp > 0 {
- a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues)
- }
-}
-
-func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) {
- var (
- count = len(a.activeTopics)
- updated = make(map[string]bool)
- unp int
- )
-
- for _, t := range topics.Items {
- if strings.Contains(t.Name, keyAdvisory) {
- continue
- }
-
- if !a.activeTopics[t.Name] {
- if a.MaxTopics != 0 && count > a.MaxTopics {
- unp++
- continue
- }
-
- if !a.filterTopics(t.Name) {
- continue
- }
-
- a.activeTopics[t.Name] = true
- a.addQueueTopicCharts(t.Name, keyTopics)
- }
-
- rname := nameReplacer.Replace(t.Name)
-
- metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount
- metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount
- metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount
- metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount
-
- updated[t.Name] = true
- }
-
- for name := range a.activeTopics {
- if !updated[name] {
- // TODO: delete after timeout?
- delete(a.activeTopics, name)
- a.removeQueueTopicCharts(name, keyTopics)
- }
- }
-
- if unp > 0 {
- a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics)
- }
-}
-
-func (a ActiveMQ) filterQueues(line string) bool {
- if a.queuesFilter == nil {
- return true
- }
- return a.queuesFilter.MatchString(line)
+func (a *ActiveMQ) Charts() *Charts {
+ return a.charts
}
-func (a ActiveMQ) filterTopics(line string) bool {
- if a.topicsFilter == nil {
- return true
+func (a *ActiveMQ) Cleanup() {
+ if a.apiClient != nil && a.apiClient.httpClient != nil {
+ a.apiClient.httpClient.CloseIdleConnections()
}
- return a.topicsFilter.MatchString(line)
}
-func (a *ActiveMQ) addQueueTopicCharts(name, typ string) {
- rname := nameReplacer.Replace(name)
-
- charts := charts.Copy()
-
- for _, chart := range *charts {
- chart.ID = fmt.Sprintf(chart.ID, typ, rname)
- chart.Title = fmt.Sprintf(chart.Title, name)
- chart.Fam = typ
+func (a *ActiveMQ) Collect() map[string]int64 {
+ mx, err := a.collect()
- for _, dim := range chart.Dims {
- dim.ID = fmt.Sprintf(dim.ID, typ, rname)
- }
+ if err != nil {
+ a.Error(err)
+ return nil
}
- _ = a.charts.Add(*charts...)
-
-}
-
-func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) {
- rname := nameReplacer.Replace(name)
-
- chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname))
- chart.MarkRemove()
- chart.MarkNotCreated()
-
- chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname))
- chart.MarkRemove()
- chart.MarkNotCreated()
-
- chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname))
- chart.MarkRemove()
- chart.MarkNotCreated()
+ return mx
}
diff --git a/modules/activemq/activemq_test.go b/modules/activemq/activemq_test.go
index e45ceecd4..0ee2f173f 100644
--- a/modules/activemq/activemq_test.go
+++ b/modules/activemq/activemq_test.go
@@ -5,15 +5,35 @@ package activemq
import (
"net/http"
"net/http/httptest"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/web"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestActiveMQ_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ActiveMQ{}, dataConfigJSON, dataConfigYAML)
+}
+
var (
queuesData = []string{
`
@@ -131,25 +151,15 @@ var (
}
)
-func TestNew(t *testing.T) {
- job := New()
-
- assert.Implements(t, (*module.Module)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration)
- assert.Equal(t, defaultMaxQueues, job.MaxQueues)
- assert.Equal(t, defaultMaxTopics, job.MaxTopics)
-}
-
func TestActiveMQ_Init(t *testing.T) {
job := New()
// NG case
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
// OK case
job.Webadmin = "webadmin"
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
assert.NotNil(t, job.apiClient)
}
@@ -170,8 +180,8 @@ func TestActiveMQ_Check(t *testing.T) {
job.HTTP.Request = web.Request{URL: ts.URL}
job.Webadmin = "webadmin"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
}
func TestActiveMQ_Charts(t *testing.T) {
@@ -203,8 +213,8 @@ func TestActiveMQ_Collect(t *testing.T) {
job.HTTP.Request = web.Request{URL: ts.URL}
job.Webadmin = "webadmin"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
cases := []struct {
expected map[string]int64
@@ -310,8 +320,8 @@ func TestActiveMQ_404(t *testing.T) {
job.Webadmin = "webadmin"
job.HTTP.Request = web.Request{URL: ts.URL}
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestActiveMQ_InvalidData(t *testing.T) {
@@ -324,6 +334,6 @@ func TestActiveMQ_InvalidData(t *testing.T) {
mod.Webadmin = "webadmin"
mod.HTTP.Request = web.Request{URL: ts.URL}
- require.True(t, mod.Init())
- assert.False(t, mod.Check())
+ require.NoError(t, mod.Init())
+ assert.Error(t, mod.Check())
}
diff --git a/modules/activemq/apiclient.go b/modules/activemq/apiclient.go
index 6835fd5aa..0be94fe70 100644
--- a/modules/activemq/apiclient.go
+++ b/modules/activemq/apiclient.go
@@ -5,11 +5,12 @@ package activemq
import (
"encoding/xml"
"fmt"
- "github.com/netdata/go.d.plugin/pkg/web"
"io"
"net/http"
"net/url"
"path"
+
+ "github.com/netdata/go.d.plugin/pkg/web"
)
type topics struct {
@@ -104,7 +105,7 @@ func (a *apiClient) getTopics() (*topics, error) {
return &topics, nil
}
-func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
+func (a *apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
resp, err := a.httpClient.Do(req)
if err != nil {
return resp, fmt.Errorf("error on request to %s : %v", req.URL, err)
@@ -117,7 +118,7 @@ func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) {
return resp, err
}
-func (a apiClient) createRequest(urlPath string) (*http.Request, error) {
+func (a *apiClient) createRequest(urlPath string) (*http.Request, error) {
req := a.request.Copy()
u, err := url.Parse(req.URL)
if err != nil {
diff --git a/modules/activemq/collect.go b/modules/activemq/collect.go
new file mode 100644
index 000000000..0dbaf5544
--- /dev/null
+++ b/modules/activemq/collect.go
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import (
+ "fmt"
+ "strings"
+)
+
+const (
+ keyQueues = "queues"
+ keyTopics = "topics"
+ keyAdvisory = "Advisory"
+)
+
+var nameReplacer = strings.NewReplacer(".", "_", " ", "")
+
+func (a *ActiveMQ) collect() (map[string]int64, error) {
+ metrics := make(map[string]int64)
+
+ var (
+ queues *queues
+ topics *topics
+ err error
+ )
+
+ if queues, err = a.apiClient.getQueues(); err != nil {
+ return nil, err
+ }
+
+ if topics, err = a.apiClient.getTopics(); err != nil {
+ return nil, err
+ }
+
+ a.processQueues(queues, metrics)
+ a.processTopics(topics, metrics)
+
+ return metrics, nil
+}
+
+func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) {
+ var (
+ count = len(a.activeQueues)
+ updated = make(map[string]bool)
+ unp int
+ )
+
+ for _, q := range queues.Items {
+ if strings.Contains(q.Name, keyAdvisory) {
+ continue
+ }
+
+ if !a.activeQueues[q.Name] {
+ if a.MaxQueues != 0 && count > a.MaxQueues {
+ unp++
+ continue
+ }
+
+ if !a.filterQueues(q.Name) {
+ continue
+ }
+
+ a.activeQueues[q.Name] = true
+ a.addQueueTopicCharts(q.Name, keyQueues)
+ }
+
+ rname := nameReplacer.Replace(q.Name)
+
+ metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount
+ metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount
+ metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount
+ metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount
+
+ updated[q.Name] = true
+ }
+
+ for name := range a.activeQueues {
+ if !updated[name] {
+ delete(a.activeQueues, name)
+ a.removeQueueTopicCharts(name, keyQueues)
+ }
+ }
+
+ if unp > 0 {
+ a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues)
+ }
+}
+
+func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) {
+ var (
+ count = len(a.activeTopics)
+ updated = make(map[string]bool)
+ unp int
+ )
+
+ for _, t := range topics.Items {
+ if strings.Contains(t.Name, keyAdvisory) {
+ continue
+ }
+
+ if !a.activeTopics[t.Name] {
+ if a.MaxTopics != 0 && count > a.MaxTopics {
+ unp++
+ continue
+ }
+
+ if !a.filterTopics(t.Name) {
+ continue
+ }
+
+ a.activeTopics[t.Name] = true
+ a.addQueueTopicCharts(t.Name, keyTopics)
+ }
+
+ rname := nameReplacer.Replace(t.Name)
+
+ metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount
+ metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount
+ metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount
+ metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount
+
+ updated[t.Name] = true
+ }
+
+ for name := range a.activeTopics {
+ if !updated[name] {
+ // TODO: delete after timeout?
+ delete(a.activeTopics, name)
+ a.removeQueueTopicCharts(name, keyTopics)
+ }
+ }
+
+ if unp > 0 {
+ a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics)
+ }
+}
+
+func (a *ActiveMQ) filterQueues(line string) bool {
+ if a.queuesFilter == nil {
+ return true
+ }
+ return a.queuesFilter.MatchString(line)
+}
+
+func (a *ActiveMQ) filterTopics(line string) bool {
+ if a.topicsFilter == nil {
+ return true
+ }
+ return a.topicsFilter.MatchString(line)
+}
+
+func (a *ActiveMQ) addQueueTopicCharts(name, typ string) {
+ rname := nameReplacer.Replace(name)
+
+ charts := charts.Copy()
+
+ for _, chart := range *charts {
+ chart.ID = fmt.Sprintf(chart.ID, typ, rname)
+ chart.Title = fmt.Sprintf(chart.Title, name)
+ chart.Fam = typ
+
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, typ, rname)
+ }
+ }
+
+ _ = a.charts.Add(*charts...)
+
+}
+
+func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) {
+ rname := nameReplacer.Replace(name)
+
+ chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname))
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+
+ chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname))
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+
+ chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname))
+ chart.MarkRemove()
+ chart.MarkNotCreated()
+}
diff --git a/modules/activemq/config_schema.json b/modules/activemq/config_schema.json
index abefb5d2f..c8a84df0e 100644
--- a/modules/activemq/config_schema.json
+++ b/modules/activemq/config_schema.json
@@ -1,75 +1,197 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/activemq job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ActiveMQ collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the ActiveMQ Web Console API.",
+ "type": "string",
+ "default": "http://127.0.0.1:8161"
+ },
+ "webadmin": {
+ "title": "Webadmin path",
+ "description": "Webadmin root path.",
+ "type": "string",
+ "default": "admin"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "max_queues": {
+ "title": "Queue limit",
+ "description": "The maximum number of concurrently collected queues.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 50
+ },
+ "queues_filter": {
+ "title": "Queue selector",
+ "description": "Queues matching the selector will be monitored. Patterns follow the syntax of Netdata simple patterns.",
+ "type": "string",
+ "minimum": 1,
+ "default": "*"
+ },
+ "max_topics": {
+ "title": "Topic limit",
+ "description": "The maximum number of concurrently collected queues.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 50
+ },
+ "topics_filter": {
+ "title": "Topic selector",
+ "description": "Topics matching the selector will be monitored. Patterns follow the syntax of Netdata simple patterns.",
+ "type": "string",
+ "minimum": 1,
+ "default": "*"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url",
+ "webadmin"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "webadmin",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Filtering",
+ "fields": [
+ "max_queues",
+ "queues_filter",
+ "max_topics",
+ "topics_filter"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "webadmin": {
- "type": "string"
- },
- "max_queues": {
- "type": "integer"
- },
- "max_topics": {
- "type": "integer"
- },
- "queues_filter": {
- "type": "string"
- },
- "topics_filter": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "tls_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url",
- "webadmin"
- ]
+ }
}
diff --git a/modules/activemq/init.go b/modules/activemq/init.go
new file mode 100644
index 000000000..920f0dd62
--- /dev/null
+++ b/modules/activemq/init.go
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package activemq
+
+import (
+ "errors"
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+)
+
+func (a *ActiveMQ) validateConfig() error {
+ if a.URL == "" {
+ return errors.New("url not set")
+ }
+ if a.Webadmin == "" {
+ return errors.New("webadmin root path set")
+ }
+ return nil
+}
+
+func (a *ActiveMQ) initQueuesFiler() (matcher.Matcher, error) {
+ if a.QueuesFilter == "" {
+ return matcher.TRUE(), nil
+ }
+ return matcher.NewSimplePatternsMatcher(a.QueuesFilter)
+}
+
+func (a *ActiveMQ) initTopicsFilter() (matcher.Matcher, error) {
+ if a.TopicsFilter == "" {
+ return matcher.TRUE(), nil
+ }
+ return matcher.NewSimplePatternsMatcher(a.TopicsFilter)
+}
diff --git a/modules/activemq/testdata/config.json b/modules/activemq/testdata/config.json
new file mode 100644
index 000000000..13327dd3f
--- /dev/null
+++ b/modules/activemq/testdata/config.json
@@ -0,0 +1,25 @@
+{
+ "update_every": 123,
+ "webadmin": "ok",
+ "max_queues": 123,
+ "max_topics": 123,
+ "queues_filter": "ok",
+ "topics_filter": "ok",
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/activemq/testdata/config.yaml b/modules/activemq/testdata/config.yaml
new file mode 100644
index 000000000..2b86db3fd
--- /dev/null
+++ b/modules/activemq/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+webadmin: "ok"
+max_queues: 123
+max_topics: 123
+queues_filter: "ok"
+topics_filter: "ok"
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
\ No newline at end of file
diff --git a/modules/apache/apache.go b/modules/apache/apache.go
index 8b117463d..f6768befd 100644
--- a/modules/apache/apache.go
+++ b/modules/apache/apache.go
@@ -4,6 +4,7 @@ package apache
import (
_ "embed"
+ "errors"
"net/http"
"sync"
"time"
@@ -30,7 +31,7 @@ func New() *Apache {
URL: "http://127.0.0.1/server-status?auto",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -40,40 +41,55 @@ func New() *Apache {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type Apache struct {
module.Base
-
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
httpClient *http.Client
- once *sync.Once
+
+ once *sync.Once
+}
+
+func (a *Apache) Configuration() any {
+ return a.Config
}
-func (a *Apache) Init() bool {
- if err := a.verifyConfig(); err != nil {
+func (a *Apache) Init() error {
+ if err := a.validateConfig(); err != nil {
a.Errorf("config validation: %v", err)
- return false
+ return err
}
httpClient, err := a.initHTTPClient()
if err != nil {
a.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
a.httpClient = httpClient
a.Debugf("using URL %s", a.URL)
- a.Debugf("using timeout: %s", a.Timeout.Duration)
- return true
+ a.Debugf("using timeout: %s", a.Timeout)
+
+ return nil
}
-func (a *Apache) Check() bool {
- return len(a.Collect()) > 0
+func (a *Apache) Check() error {
+ mx, err := a.collect()
+ if err != nil {
+ a.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (a *Apache) Charts() *module.Charts {
diff --git a/modules/apache/apache_test.go b/modules/apache/apache_test.go
index a507113f3..32f7c86a0 100644
--- a/modules/apache/apache_test.go
+++ b/modules/apache/apache_test.go
@@ -8,6 +8,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/web"
"github.com/stretchr/testify/assert"
@@ -15,6 +16,9 @@ import (
)
var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
dataSimpleStatusMPMEvent, _ = os.ReadFile("testdata/simple-status-mpm-event.txt")
dataExtendedStatusMPMEvent, _ = os.ReadFile("testdata/extended-status-mpm-event.txt")
dataExtendedStatusMPMPrefork, _ = os.ReadFile("testdata/extended-status-mpm-prefork.txt")
@@ -23,16 +27,22 @@ var (
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
"dataSimpleStatusMPMEvent": dataSimpleStatusMPMEvent,
"dataExtendedStatusMPMEvent": dataExtendedStatusMPMEvent,
"dataExtendedStatusMPMPrefork": dataExtendedStatusMPMPrefork,
"dataLighttpdStatus": dataLighttpdStatus,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestApache_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Apache{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestApache_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -66,9 +76,9 @@ func TestApache_Init(t *testing.T) {
apache.Config = test.config
if test.wantFail {
- assert.False(t, apache.Init())
+ assert.Error(t, apache.Init())
} else {
- assert.True(t, apache.Init())
+ assert.NoError(t, apache.Init())
}
})
}
@@ -115,9 +125,9 @@ func TestApache_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, apache.Check())
+ assert.Error(t, apache.Check())
} else {
- assert.True(t, apache.Check())
+ assert.NoError(t, apache.Check())
}
})
}
@@ -255,7 +265,7 @@ func caseMPMEventSimpleStatus(t *testing.T) (*Apache, func()) {
}))
apache := New()
apache.URL = srv.URL + "/server-status?auto"
- require.True(t, apache.Init())
+ require.NoError(t, apache.Init())
return apache, srv.Close
}
@@ -268,7 +278,7 @@ func caseMPMEventExtendedStatus(t *testing.T) (*Apache, func()) {
}))
apache := New()
apache.URL = srv.URL + "/server-status?auto"
- require.True(t, apache.Init())
+ require.NoError(t, apache.Init())
return apache, srv.Close
}
@@ -281,7 +291,7 @@ func caseMPMPreforkExtendedStatus(t *testing.T) (*Apache, func()) {
}))
apache := New()
apache.URL = srv.URL + "/server-status?auto"
- require.True(t, apache.Init())
+ require.NoError(t, apache.Init())
return apache, srv.Close
}
@@ -294,7 +304,7 @@ func caseLighttpdResponse(t *testing.T) (*Apache, func()) {
}))
apache := New()
apache.URL = srv.URL + "/server-status?auto"
- require.True(t, apache.Init())
+ require.NoError(t, apache.Init())
return apache, srv.Close
}
@@ -307,7 +317,7 @@ func caseInvalidDataResponse(t *testing.T) (*Apache, func()) {
}))
apache := New()
apache.URL = srv.URL + "/server-status?auto"
- require.True(t, apache.Init())
+ require.NoError(t, apache.Init())
return apache, srv.Close
}
@@ -316,7 +326,7 @@ func caseConnectionRefused(t *testing.T) (*Apache, func()) {
t.Helper()
apache := New()
apache.URL = "http://127.0.0.1:65001/server-status?auto"
- require.True(t, apache.Init())
+ require.NoError(t, apache.Init())
return apache, func() {}
}
@@ -329,7 +339,7 @@ func case404(t *testing.T) (*Apache, func()) {
}))
apache := New()
apache.URL = srv.URL + "/server-status?auto"
- require.True(t, apache.Init())
+ require.NoError(t, apache.Init())
return apache, srv.Close
}
diff --git a/modules/apache/config_schema.json b/modules/apache/config_schema.json
index 81ece2b67..8391d497e 100644
--- a/modules/apache/config_schema.json
+++ b/modules/apache/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/apache job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Apache collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Apache status page.",
+ "type": "string",
+ "default": "http://127.0.0.1/server-status?auto"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "url": {
- "type": "string"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
+ "ui:widget": "password"
},
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/apache/init.go b/modules/apache/init.go
index 355999770..8c4699cc1 100644
--- a/modules/apache/init.go
+++ b/modules/apache/init.go
@@ -10,7 +10,7 @@ import (
"github.com/netdata/go.d.plugin/pkg/web"
)
-func (a Apache) verifyConfig() error {
+func (a *Apache) validateConfig() error {
if a.URL == "" {
return errors.New("url not set")
}
@@ -20,6 +20,6 @@ func (a Apache) verifyConfig() error {
return nil
}
-func (a Apache) initHTTPClient() (*http.Client, error) {
+func (a *Apache) initHTTPClient() (*http.Client, error) {
return web.NewHTTPClient(a.Client)
}
diff --git a/modules/apache/testdata/config.json b/modules/apache/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/apache/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/apache/testdata/config.yaml b/modules/apache/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/apache/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/bind/bind.go b/modules/bind/bind.go
index bcca0204e..50a0c9953 100644
--- a/modules/bind/bind.go
+++ b/modules/bind/bind.go
@@ -4,8 +4,8 @@ package bind
import (
_ "embed"
- "fmt"
- "strings"
+ "errors"
+ "net/http"
"time"
"github.com/netdata/go.d.plugin/pkg/matcher"
@@ -24,286 +24,112 @@ func init() {
})
}
-const (
- defaultURL = "http://127.0.0.1:8653/json/v1"
- defaultHTTPTimeout = time.Second * 2
-)
-
-// New creates Bind with default values.
func New() *Bind {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &Bind{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8653/json/v1",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
-
- return &Bind{
- Config: config,
charts: &Charts{},
}
}
-type bindAPIClient interface {
- serverStats() (*serverStats, error)
-}
-
-// Config is the Bind module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
- PermitView string `yaml:"permit_view"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ PermitView string `yaml:"permit_view" json:"permit_view"`
}
-// Bind Bind module.
-type Bind struct {
- module.Base
- Config `yaml:",inline"`
+type (
+ Bind struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- bindAPIClient
- permitView matcher.Matcher
- charts *Charts
-}
+ charts *Charts
-// Cleanup makes cleanup.
-func (Bind) Cleanup() {}
+ httpClient *http.Client
+ bindAPIClient
+
+ permitView matcher.Matcher
+ }
-// Init makes initialization.
-func (b *Bind) Init() bool {
- if b.URL == "" {
- b.Error("URL not set")
- return false
+ bindAPIClient interface {
+ serverStats() (*serverStats, error)
}
+)
+
+func (b *Bind) Configuration() any {
+ return b.Config
+}
- client, err := web.NewHTTPClient(b.Client)
+func (b *Bind) Init() error {
+ if err := b.validateConfig(); err != nil {
+ b.Errorf("config verification: %v", err)
+ return err
+ }
+
+ pvm, err := b.initPermitViewMatcher()
if err != nil {
- b.Errorf("error on creating http client : %v", err)
- return false
+ b.Error(err)
+ return err
+ }
+ if pvm != nil {
+ b.permitView = pvm
}
- switch {
- case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+
- b.bindAPIClient = newXML3Client(client, b.Request)
- case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+
- b.bindAPIClient = newJSONClient(client, b.Request)
- default:
- b.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL)
- return false
+ httpClient, err := web.NewHTTPClient(b.Client)
+ if err != nil {
+ b.Errorf("creating http client : %v", err)
+ return err
}
+ b.httpClient = httpClient
- if b.PermitView != "" {
- m, err := matcher.NewSimplePatternsMatcher(b.PermitView)
- if err != nil {
- b.Errorf("error on creating permitView matcher : %v", err)
- return false
- }
- b.permitView = matcher.WithCache(m)
+ bindClient, err := b.initBindApiClient(httpClient)
+ if err != nil {
+ b.Error(err)
+ return err
}
+ b.bindAPIClient = bindClient
- return true
+ return nil
}
-// Check makes check.
-func (b *Bind) Check() bool {
- return len(b.Collect()) > 0
+func (b *Bind) Check() error {
+ mx, err := b.collect()
+ if err != nil {
+ b.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
-// Charts creates Charts.
-func (b Bind) Charts() *Charts {
+func (b *Bind) Charts() *Charts {
return b.charts
}
-// Collect collects metrics.
func (b *Bind) Collect() map[string]int64 {
- metrics := make(map[string]int64)
+ mx, err := b.collect()
- s, err := b.serverStats()
if err != nil {
b.Error(err)
return nil
}
- b.collectServerStats(metrics, s)
- return metrics
+ return mx
}
-func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) {
- var chart *Chart
-
- for k, v := range stats.NSStats {
- var (
- algo = module.Incremental
- dimName = k
- chartID string
- )
- switch {
- default:
- continue
- case k == "RecursClients":
- dimName = "clients"
- chartID = keyRecursiveClients
- algo = module.Absolute
- case k == "Requestv4":
- dimName = "IPv4"
- chartID = keyReceivedRequests
- case k == "Requestv6":
- dimName = "IPv6"
- chartID = keyReceivedRequests
- case k == "QryFailure":
- dimName = "failures"
- chartID = keyQueryFailures
- case k == "QryUDP":
- dimName = "UDP"
- chartID = keyProtocolsQueries
- case k == "QryTCP":
- dimName = "TCP"
- chartID = keyProtocolsQueries
- case k == "QrySuccess":
- dimName = "queries"
- chartID = keyQueriesSuccess
- case strings.HasSuffix(k, "QryRej"):
- chartID = keyQueryFailuresDetail
- case strings.HasPrefix(k, "Qry"):
- chartID = keyQueriesAnalysis
- case strings.HasPrefix(k, "Update"):
- chartID = keyReceivedUpdates
- }
-
- if !b.charts.Has(chartID) {
- _ = b.charts.Add(charts[chartID].Copy())
- }
-
- chart = b.charts.Get(chartID)
-
- if !chart.HasDim(k) {
- _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo})
- chart.MarkNotCreated()
- }
-
- delete(stats.NSStats, k)
- metrics[k] = v
- }
-
- for _, v := range []struct {
- item map[string]int64
- chartID string
- }{
- {item: stats.NSStats, chartID: keyNSStats},
- {item: stats.OpCodes, chartID: keyInOpCodes},
- {item: stats.QTypes, chartID: keyInQTypes},
- {item: stats.SockStats, chartID: keyInSockStats},
- } {
- if len(v.item) == 0 {
- continue
- }
-
- if !b.charts.Has(v.chartID) {
- _ = b.charts.Add(charts[v.chartID].Copy())
- }
-
- chart = b.charts.Get(v.chartID)
-
- for key, val := range v.item {
- if !chart.HasDim(key) {
- _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental})
- chart.MarkNotCreated()
- }
-
- metrics[key] = val
- }
- }
-
- if !(b.permitView != nil && len(stats.Views) > 0) {
- return
- }
-
- for name, view := range stats.Views {
- if !b.permitView.MatchString(name) {
- continue
- }
- r := view.Resolver
-
- delete(r.Stats, "BucketSize")
-
- for key, val := range r.Stats {
- var (
- algo = module.Incremental
- dimName = key
- chartKey string
- )
-
- switch {
- default:
- chartKey = keyResolverStats
- case key == "NumFetch":
- chartKey = keyResolverNumFetch
- dimName = "queries"
- algo = module.Absolute
- case strings.HasPrefix(key, "QryRTT"):
- // TODO: not ordered
- chartKey = keyResolverRTT
- }
-
- chartID := fmt.Sprintf(chartKey, name)
-
- if !b.charts.Has(chartID) {
- chart = charts[chartKey].Copy()
- chart.ID = chartID
- chart.Fam = fmt.Sprintf(chart.Fam, name)
- _ = b.charts.Add(chart)
- }
-
- chart = b.charts.Get(chartID)
- dimID := fmt.Sprintf("%s_%s", name, key)
-
- if !chart.HasDim(dimID) {
- _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo})
- chart.MarkNotCreated()
- }
-
- metrics[dimID] = val
- }
-
- if len(r.QTypes) > 0 {
- chartID := fmt.Sprintf(keyResolverInQTypes, name)
-
- if !b.charts.Has(chartID) {
- chart = charts[keyResolverInQTypes].Copy()
- chart.ID = chartID
- chart.Fam = fmt.Sprintf(chart.Fam, name)
- _ = b.charts.Add(chart)
- }
-
- chart = b.charts.Get(chartID)
-
- for key, val := range r.QTypes {
- dimID := fmt.Sprintf("%s_%s", name, key)
- if !chart.HasDim(dimID) {
- _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental})
- chart.MarkNotCreated()
- }
- metrics[dimID] = val
- }
- }
-
- if len(r.CacheStats) > 0 {
- chartID := fmt.Sprintf(keyResolverCacheHits, name)
-
- if !b.charts.Has(chartID) {
- chart = charts[keyResolverCacheHits].Copy()
- chart.ID = chartID
- chart.Fam = fmt.Sprintf(chart.Fam, name)
- _ = b.charts.Add(chart)
- for _, dim := range chart.Dims {
- dim.ID = fmt.Sprintf(dim.ID, name)
- }
- }
-
- metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"]
- metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"]
- }
+func (b *Bind) Cleanup() {
+ if b.httpClient != nil {
+ b.httpClient.CloseIdleConnections()
}
}
diff --git a/modules/bind/bind_test.go b/modules/bind/bind_test.go
index 65ff36af0..540eacdcb 100644
--- a/modules/bind/bind_test.go
+++ b/modules/bind/bind_test.go
@@ -8,21 +8,34 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- jsonServerData, _ = os.ReadFile("testdata/query-server.json")
- xmlServerData, _ = os.ReadFile("testdata/query-server.xml")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataServerStatsJSON, _ = os.ReadFile("testdata/query-server.json")
+ dataServerStatsXML, _ = os.ReadFile("testdata/query-server.xml")
)
-func TestNew(t *testing.T) {
- job := New()
- assert.IsType(t, (*Bind)(nil), job)
- assert.NotNil(t, job.charts)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataServerStatsJSON": dataServerStatsJSON,
+ "dataServerStatsXML": dataServerStatsXML,
+ } {
+ require.NotNil(t, data, name)
+
+ }
+}
+
+func TestBind_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Bind{}, dataConfigJSON, dataConfigYAML)
}
func TestBind_Cleanup(t *testing.T) { New().Cleanup() }
@@ -30,15 +43,13 @@ func TestBind_Cleanup(t *testing.T) { New().Cleanup() }
func TestBind_Init(t *testing.T) {
// OK
job := New()
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
assert.NotNil(t, job.bindAPIClient)
//NG
job = New()
job.URL = ""
- assert.False(t, job.Init())
- job.URL = defaultURL[:len(defaultURL)-1]
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestBind_Check(t *testing.T) {
@@ -46,7 +57,7 @@ func TestBind_Check(t *testing.T) {
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/json/v1/server" {
- _, _ = w.Write(jsonServerData)
+ _, _ = w.Write(dataServerStatsJSON)
}
}))
defer ts.Close()
@@ -54,26 +65,28 @@ func TestBind_Check(t *testing.T) {
job := New()
job.URL = ts.URL + "/json/v1"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
}
func TestBind_CheckNG(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/xml/v3"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
-func TestBind_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
+func TestBind_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
func TestBind_CollectJSON(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/json/v1/server" {
- _, _ = w.Write(jsonServerData)
+ _, _ = w.Write(dataServerStatsJSON)
}
}))
defer ts.Close()
@@ -82,8 +95,8 @@ func TestBind_CollectJSON(t *testing.T) {
job.URL = ts.URL + "/json/v1"
job.PermitView = "*"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"_default_Queryv4": 4503685324,
@@ -250,7 +263,7 @@ func TestBind_CollectXML3(t *testing.T) {
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path == "/xml/v3/server" {
- _, _ = w.Write(xmlServerData)
+ _, _ = w.Write(dataServerStatsXML)
}
}))
defer ts.Close()
@@ -259,8 +272,8 @@ func TestBind_CollectXML3(t *testing.T) {
job.PermitView = "*"
job.URL = ts.URL + "/xml/v3"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"_bind_CookieClientOk": 0,
@@ -504,8 +517,8 @@ func TestBind_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL + "/json/v1"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestBind_404(t *testing.T) {
@@ -514,6 +527,6 @@ func TestBind_404(t *testing.T) {
job := New()
job.URL = ts.URL + "/json/v1"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/bind/collect.go b/modules/bind/collect.go
new file mode 100644
index 000000000..cd10634b0
--- /dev/null
+++ b/modules/bind/collect.go
@@ -0,0 +1,200 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/agent/module"
+)
+
+func (b *Bind) collect() (map[string]int64, error) {
+ mx := make(map[string]int64)
+
+ s, err := b.serverStats()
+ if err != nil {
+ return nil, err
+ }
+ b.collectServerStats(mx, s)
+
+ return mx, nil
+}
+
+func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) {
+ var chart *Chart
+
+ for k, v := range stats.NSStats {
+ var (
+ algo = module.Incremental
+ dimName = k
+ chartID string
+ )
+ switch {
+ default:
+ continue
+ case k == "RecursClients":
+ dimName = "clients"
+ chartID = keyRecursiveClients
+ algo = module.Absolute
+ case k == "Requestv4":
+ dimName = "IPv4"
+ chartID = keyReceivedRequests
+ case k == "Requestv6":
+ dimName = "IPv6"
+ chartID = keyReceivedRequests
+ case k == "QryFailure":
+ dimName = "failures"
+ chartID = keyQueryFailures
+ case k == "QryUDP":
+ dimName = "UDP"
+ chartID = keyProtocolsQueries
+ case k == "QryTCP":
+ dimName = "TCP"
+ chartID = keyProtocolsQueries
+ case k == "QrySuccess":
+ dimName = "queries"
+ chartID = keyQueriesSuccess
+ case strings.HasSuffix(k, "QryRej"):
+ chartID = keyQueryFailuresDetail
+ case strings.HasPrefix(k, "Qry"):
+ chartID = keyQueriesAnalysis
+ case strings.HasPrefix(k, "Update"):
+ chartID = keyReceivedUpdates
+ }
+
+ if !b.charts.Has(chartID) {
+ _ = b.charts.Add(charts[chartID].Copy())
+ }
+
+ chart = b.charts.Get(chartID)
+
+ if !chart.HasDim(k) {
+ _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo})
+ chart.MarkNotCreated()
+ }
+
+ delete(stats.NSStats, k)
+ metrics[k] = v
+ }
+
+ for _, v := range []struct {
+ item map[string]int64
+ chartID string
+ }{
+ {item: stats.NSStats, chartID: keyNSStats},
+ {item: stats.OpCodes, chartID: keyInOpCodes},
+ {item: stats.QTypes, chartID: keyInQTypes},
+ {item: stats.SockStats, chartID: keyInSockStats},
+ } {
+ if len(v.item) == 0 {
+ continue
+ }
+
+ if !b.charts.Has(v.chartID) {
+ _ = b.charts.Add(charts[v.chartID].Copy())
+ }
+
+ chart = b.charts.Get(v.chartID)
+
+ for key, val := range v.item {
+ if !chart.HasDim(key) {
+ _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+
+ metrics[key] = val
+ }
+ }
+
+ if !(b.permitView != nil && len(stats.Views) > 0) {
+ return
+ }
+
+ for name, view := range stats.Views {
+ if !b.permitView.MatchString(name) {
+ continue
+ }
+ r := view.Resolver
+
+ delete(r.Stats, "BucketSize")
+
+ for key, val := range r.Stats {
+ var (
+ algo = module.Incremental
+ dimName = key
+ chartKey string
+ )
+
+ switch {
+ default:
+ chartKey = keyResolverStats
+ case key == "NumFetch":
+ chartKey = keyResolverNumFetch
+ dimName = "queries"
+ algo = module.Absolute
+ case strings.HasPrefix(key, "QryRTT"):
+ // TODO: not ordered
+ chartKey = keyResolverRTT
+ }
+
+ chartID := fmt.Sprintf(chartKey, name)
+
+ if !b.charts.Has(chartID) {
+ chart = charts[chartKey].Copy()
+ chart.ID = chartID
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ _ = b.charts.Add(chart)
+ }
+
+ chart = b.charts.Get(chartID)
+ dimID := fmt.Sprintf("%s_%s", name, key)
+
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo})
+ chart.MarkNotCreated()
+ }
+
+ metrics[dimID] = val
+ }
+
+ if len(r.QTypes) > 0 {
+ chartID := fmt.Sprintf(keyResolverInQTypes, name)
+
+ if !b.charts.Has(chartID) {
+ chart = charts[keyResolverInQTypes].Copy()
+ chart.ID = chartID
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ _ = b.charts.Add(chart)
+ }
+
+ chart = b.charts.Get(chartID)
+
+ for key, val := range r.QTypes {
+ dimID := fmt.Sprintf("%s_%s", name, key)
+ if !chart.HasDim(dimID) {
+ _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental})
+ chart.MarkNotCreated()
+ }
+ metrics[dimID] = val
+ }
+ }
+
+ if len(r.CacheStats) > 0 {
+ chartID := fmt.Sprintf(keyResolverCacheHits, name)
+
+ if !b.charts.Has(chartID) {
+ chart = charts[keyResolverCacheHits].Copy()
+ chart.ID = chartID
+ chart.Fam = fmt.Sprintf(chart.Fam, name)
+ _ = b.charts.Add(chart)
+ for _, dim := range chart.Dims {
+ dim.ID = fmt.Sprintf(dim.ID, name)
+ }
+ }
+
+ metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"]
+ metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"]
+ }
+ }
+}
diff --git a/modules/bind/config_schema.json b/modules/bind/config_schema.json
index 042f47a1a..8cc84bf6c 100644
--- a/modules/bind/config_schema.json
+++ b/modules/bind/config_schema.json
@@ -1,21 +1,152 @@
{
- "$id": "https://example.com/person.schema.json",
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "title": "Bind collector job configuration",
- "type": "object",
- "properties": {
- "firstName": {
- "type": "string",
- "description": "The person's first name."
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Bind collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Bind statistics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8653/json/v1"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "lastName": {
- "type": "string",
- "description": "The person's last name."
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "age": {
- "description": "Age in years which must be equal to or greater than zero.",
- "type": "integer",
- "minimum": 0
+ "uiOptions": {
+ "fullPage": true
+ },
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "password": {
+ "ui:widget": "password"
+ },
+ "proxy_password": {
+ "ui:widget": "password"
}
}
}
diff --git a/modules/bind/init.go b/modules/bind/init.go
new file mode 100644
index 000000000..daffe29bd
--- /dev/null
+++ b/modules/bind/init.go
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package bind
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+)
+
+func (b *Bind) validateConfig() error {
+ if b.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (b *Bind) initPermitViewMatcher() (matcher.Matcher, error) {
+ if b.PermitView == "" {
+ return nil, nil
+ }
+ return matcher.NewSimplePatternsMatcher(b.PermitView)
+}
+
+func (b *Bind) initBindApiClient(httpClient *http.Client) (bindAPIClient, error) {
+ switch {
+ case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+
+ return newXML3Client(httpClient, b.Request), nil
+ case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+
+ return newJSONClient(httpClient, b.Request), nil
+ default:
+ return nil, fmt.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL)
+ }
+}
diff --git a/modules/bind/testdata/config.json b/modules/bind/testdata/config.json
new file mode 100644
index 000000000..145df9ff4
--- /dev/null
+++ b/modules/bind/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "permit_view": "ok"
+}
diff --git a/modules/bind/testdata/config.yaml b/modules/bind/testdata/config.yaml
new file mode 100644
index 000000000..cc0a33b74
--- /dev/null
+++ b/modules/bind/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+permit_view: "ok"
diff --git a/modules/cassandra/cassandra.go b/modules/cassandra/cassandra.go
index 1e745fbd8..3cdb9211d 100644
--- a/modules/cassandra/cassandra.go
+++ b/modules/cassandra/cassandra.go
@@ -4,6 +4,7 @@ package cassandra
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -32,7 +33,7 @@ func New() *Cassandra {
URL: "http://127.0.0.1:7072/metrics",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second * 5),
},
},
},
@@ -43,39 +44,54 @@ func New() *Cassandra {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type Cassandra struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
prom prometheus.Prometheus
validateMetrics bool
- mx *cassandraMetrics
+
+ mx *cassandraMetrics
+}
+
+func (c *Cassandra) Configuration() any {
+ return c.Config
}
-func (c *Cassandra) Init() bool {
+func (c *Cassandra) Init() error {
if err := c.validateConfig(); err != nil {
c.Errorf("error on validating config: %v", err)
- return false
+ return err
}
prom, err := c.initPrometheusClient()
if err != nil {
c.Errorf("error on init prometheus client: %v", err)
- return false
+ return err
}
c.prom = prom
- return true
+ return nil
}
-func (c *Cassandra) Check() bool {
- return len(c.Collect()) > 0
+func (c *Cassandra) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (c *Cassandra) Charts() *module.Charts {
@@ -94,4 +110,8 @@ func (c *Cassandra) Collect() map[string]int64 {
return mx
}
-func (c *Cassandra) Cleanup() {}
+func (c *Cassandra) Cleanup() {
+ if c.prom != nil && c.prom.HTTPClient() != nil {
+ c.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/cassandra/cassandra_test.go b/modules/cassandra/cassandra_test.go
index 4425de46e..9d8c4b6a8 100644
--- a/modules/cassandra/cassandra_test.go
+++ b/modules/cassandra/cassandra_test.go
@@ -8,6 +8,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/web"
"github.com/stretchr/testify/assert"
@@ -15,17 +16,26 @@ import (
)
var (
- dataMetrics, _ = os.ReadFile("testdata/metrics.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataExpectedMetrics, _ = os.ReadFile("testdata/metrics.txt")
)
-func Test_TestData(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "dataMetrics": dataMetrics,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataExpectedMetrics": dataExpectedMetrics,
} {
- assert.NotNilf(t, data, name)
+ assert.NotNil(t, data, name)
}
}
+func TestCassandra_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Cassandra{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestNew(t *testing.T) {
assert.IsType(t, (*Cassandra)(nil), New())
}
@@ -55,9 +65,9 @@ func TestCassandra_Init(t *testing.T) {
c.Config = test.config
if test.wantFail {
- assert.False(t, c.Init())
+ assert.Error(t, c.Init())
} else {
- assert.True(t, c.Init())
+ assert.NoError(t, c.Init())
}
})
}
@@ -90,12 +100,12 @@ func TestCassandra_Check(t *testing.T) {
c, cleanup := test.prepare()
defer cleanup()
- require.True(t, c.Init())
+ require.NoError(t, c.Init())
if test.wantFail {
- assert.False(t, c.Check())
+ assert.Error(t, c.Check())
} else {
- assert.True(t, c.Check())
+ assert.NoError(t, c.Check())
}
})
}
@@ -239,7 +249,7 @@ func TestCassandra_Collect(t *testing.T) {
c, cleanup := test.prepare()
defer cleanup()
- require.True(t, c.Init())
+ require.NoError(t, c.Init())
mx := c.Collect()
@@ -251,7 +261,7 @@ func TestCassandra_Collect(t *testing.T) {
func prepareCassandra() (c *Cassandra, cleanup func()) {
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(dataMetrics)
+ _, _ = w.Write(dataExpectedMetrics)
}))
c = New()
diff --git a/modules/cassandra/config_schema.json b/modules/cassandra/config_schema.json
index ff22764ec..4895d8b14 100644
--- a/modules/cassandra/config_schema.json
+++ b/modules/cassandra/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/cassandra job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Cassandra collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Cassandra JMX exporter metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:7072/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/cassandra/testdata/config.json b/modules/cassandra/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/cassandra/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/cassandra/testdata/config.yaml b/modules/cassandra/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/cassandra/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/chrony/chrony.go b/modules/chrony/chrony.go
index 9f12325b9..d8eaa31c7 100644
--- a/modules/chrony/chrony.go
+++ b/modules/chrony/chrony.go
@@ -4,6 +4,7 @@ package chrony
import (
_ "embed"
+ "errors"
"time"
"github.com/facebook/time/ntp/chrony"
@@ -25,7 +26,7 @@ func New() *Chrony {
return &Chrony{
Config: Config{
Address: "127.0.0.1:323",
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
charts: charts.Copy(),
newClient: newChronyClient,
@@ -33,19 +34,20 @@ func New() *Chrony {
}
type Config struct {
- Address string `yaml:"address"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type (
Chrony struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- newClient func(c Config) (chronyClient, error)
client chronyClient
+ newClient func(c Config) (chronyClient, error)
}
chronyClient interface {
Tracking() (*chrony.ReplyTracking, error)
@@ -54,17 +56,30 @@ type (
}
)
-func (c *Chrony) Init() bool {
+func (c *Chrony) Configuration() any {
+ return c.Config
+}
+
+func (c *Chrony) Init() error {
if err := c.validateConfig(); err != nil {
c.Errorf("config validation: %v", err)
- return false
+ return err
}
- return true
+ return nil
}
-func (c *Chrony) Check() bool {
- return len(c.Collect()) > 0
+func (c *Chrony) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (c *Chrony) Charts() *module.Charts {
diff --git a/modules/chrony/chrony_test.go b/modules/chrony/chrony_test.go
index a6568b234..20fee0084 100644
--- a/modules/chrony/chrony_test.go
+++ b/modules/chrony/chrony_test.go
@@ -5,14 +5,35 @@ package chrony
import (
"errors"
"net"
+ "os"
"testing"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/facebook/time/ntp/chrony"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestChrony_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Chrony{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestChrony_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -35,9 +56,9 @@ func TestChrony_Init(t *testing.T) {
c.Config = test.config
if test.wantFail {
- assert.False(t, c.Init())
+ assert.Error(t, c.Init())
} else {
- assert.True(t, c.Init())
+ assert.NoError(t, c.Init())
}
})
}
@@ -53,7 +74,7 @@ func TestChrony_Check(t *testing.T) {
prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) },
},
"tracking: success, activity: fail": {
- wantFail: false,
+ wantFail: true,
prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnActivity: true}) },
},
"tracking: fail, activity: success": {
@@ -74,12 +95,12 @@ func TestChrony_Check(t *testing.T) {
t.Run(name, func(t *testing.T) {
c := test.prepare()
- require.True(t, c.Init())
+ require.NoError(t, c.Init())
if test.wantFail {
- assert.False(t, c.Check())
+ assert.Error(t, c.Check())
} else {
- assert.True(t, c.Check())
+ assert.NoError(t, c.Check())
}
})
}
@@ -100,15 +121,15 @@ func TestChrony_Cleanup(t *testing.T) {
},
"after Init": {
wantClose: false,
- prepare: func(c *Chrony) { c.Init() },
+ prepare: func(c *Chrony) { _ = c.Init() },
},
"after Check": {
wantClose: true,
- prepare: func(c *Chrony) { c.Init(); c.Check() },
+ prepare: func(c *Chrony) { _ = c.Init(); _ = c.Check() },
},
"after Collect": {
wantClose: true,
- prepare: func(c *Chrony) { c.Init(); c.Collect() },
+ prepare: func(c *Chrony) { _ = c.Init(); _ = c.Collect() },
},
}
@@ -197,7 +218,7 @@ func TestChrony_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
c := test.prepare()
- require.True(t, c.Init())
+ require.NoError(t, c.Init())
_ = c.Check()
collected := c.Collect()
@@ -224,7 +245,7 @@ type mockClient struct {
closeCalled bool
}
-func (m mockClient) Tracking() (*chrony.ReplyTracking, error) {
+func (m *mockClient) Tracking() (*chrony.ReplyTracking, error) {
if m.errOnTracking {
return nil, errors.New("mockClient.Tracking call error")
}
@@ -249,7 +270,7 @@ func (m mockClient) Tracking() (*chrony.ReplyTracking, error) {
return &reply, nil
}
-func (m mockClient) Activity() (*chrony.ReplyActivity, error) {
+func (m *mockClient) Activity() (*chrony.ReplyActivity, error) {
if m.errOnActivity {
return nil, errors.New("mockClient.Activity call error")
}
diff --git a/modules/chrony/client.go b/modules/chrony/client.go
index caa219f3b..e850ff239 100644
--- a/modules/chrony/client.go
+++ b/modules/chrony/client.go
@@ -10,7 +10,7 @@ import (
)
func newChronyClient(c Config) (chronyClient, error) {
- conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration)
+ conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration())
if err != nil {
return nil, err
}
diff --git a/modules/chrony/config_schema.json b/modules/chrony/config_schema.json
index 105adaa79..a3e025584 100644
--- a/modules/chrony/config_schema.json
+++ b/modules/chrony/config_schema.json
@@ -1,23 +1,39 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/chrony job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Chrony collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "Chrony address. The format is IP:PORT.",
+ "type": "string",
+ "default": "127.0.0.1:323"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection timeout in seconds.",
+ "type": "number",
+ "default": 1
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/chrony/init.go b/modules/chrony/init.go
index 70c8916f2..828112c9d 100644
--- a/modules/chrony/init.go
+++ b/modules/chrony/init.go
@@ -6,7 +6,7 @@ import (
"errors"
)
-func (c Chrony) validateConfig() error {
+func (c *Chrony) validateConfig() error {
if c.Address == "" {
return errors.New("empty 'address'")
}
diff --git a/modules/chrony/testdata/config.json b/modules/chrony/testdata/config.json
new file mode 100644
index 000000000..e86834720
--- /dev/null
+++ b/modules/chrony/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/modules/chrony/testdata/config.yaml b/modules/chrony/testdata/config.yaml
new file mode 100644
index 000000000..1b81d09eb
--- /dev/null
+++ b/modules/chrony/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
diff --git a/modules/cockroachdb/cockroachdb.go b/modules/cockroachdb/cockroachdb.go
index 0a862f97e..039ca8897 100644
--- a/modules/cockroachdb/cockroachdb.go
+++ b/modules/cockroachdb/cockroachdb.go
@@ -7,97 +7,94 @@ import (
"errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
-// DefaultMetricsSampleInterval hard coded to 10
-// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58
-const cockroachDBSamplingInterval = 10
-
//go:embed "config_schema.json"
var configSchema string
+// DefaultMetricsSampleInterval hard coded to 10
+// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58
+const dbSamplingInterval = 10
+
func init() {
module.Register("cockroachdb", module.Creator{
JobConfigSchema: configSchema,
Defaults: module.Defaults{
- UpdateEvery: cockroachDBSamplingInterval,
+ UpdateEvery: dbSamplingInterval,
},
Create: func() module.Module { return New() },
})
}
func New() *CockroachDB {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:8080/_status/vars",
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ return &CockroachDB{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8080/_status/vars",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
-
- return &CockroachDB{
- Config: config,
charts: charts.Copy(),
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- UpdateEvery int `yaml:"update_every"`
- }
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
- CockroachDB struct {
- module.Base
- Config `yaml:",inline"`
+type CockroachDB struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- prom prometheus.Prometheus
- charts *Charts
- }
-)
+ charts *Charts
-func (c *CockroachDB) validateConfig() error {
- if c.URL == "" {
- return errors.New("URL is not set")
- }
- return nil
+ prom prometheus.Prometheus
}
-func (c *CockroachDB) initClient() error {
- client, err := web.NewHTTPClient(c.Client)
- if err != nil {
- return err
- }
-
- c.prom = prometheus.New(client, c.Request)
- return nil
+func (c *CockroachDB) Configuration() any {
+ return c.Config
}
-func (c *CockroachDB) Init() bool {
+func (c *CockroachDB) Init() error {
if err := c.validateConfig(); err != nil {
c.Errorf("error on validating config: %v", err)
- return false
+ return err
}
- if err := c.initClient(); err != nil {
- c.Errorf("error on initializing client: %v", err)
- return false
+
+ prom, err := c.initPrometheusClient()
+ if err != nil {
+ c.Error(err)
+ return err
}
- if c.UpdateEvery < cockroachDBSamplingInterval {
+ c.prom = prom
+
+ if c.UpdateEvery < dbSamplingInterval {
c.Warningf("'update_every'(%d) is lower then CockroachDB default sampling interval (%d)",
- c.UpdateEvery, cockroachDBSamplingInterval)
+ c.UpdateEvery, dbSamplingInterval)
}
- return true
+
+ return nil
}
-func (c *CockroachDB) Check() bool {
- return len(c.Collect()) > 0
+func (c *CockroachDB) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (c *CockroachDB) Charts() *Charts {
@@ -116,4 +113,8 @@ func (c *CockroachDB) Collect() map[string]int64 {
return mx
}
-func (CockroachDB) Cleanup() {}
+func (c *CockroachDB) Cleanup() {
+ if c.prom != nil && c.prom.HTTPClient() != nil {
+ c.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/cockroachdb/cockroachdb_test.go b/modules/cockroachdb/cockroachdb_test.go
index 88c307716..1d56e9416 100644
--- a/modules/cockroachdb/cockroachdb_test.go
+++ b/modules/cockroachdb/cockroachdb_test.go
@@ -9,18 +9,32 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- metricsData, _ = os.ReadFile("testdata/metrics.txt")
- wrongMetricsData, _ = os.ReadFile("testdata/non_cockroachdb.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataExpectedMetrics, _ = os.ReadFile("testdata/metrics.txt")
+ dataUnexpectedMetrics, _ = os.ReadFile("testdata/non_cockroachdb.txt")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, metricsData)
- assert.NotNil(t, wrongMetricsData)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataExpectedMetrics": dataExpectedMetrics,
+ "dataUnexpectedMetrics": dataUnexpectedMetrics,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestCockroachDB_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &CockroachDB{}, dataConfigJSON, dataConfigYAML)
}
func TestNew(t *testing.T) {
@@ -30,36 +44,36 @@ func TestNew(t *testing.T) {
func TestCockroachDB_Init(t *testing.T) {
cdb := prepareCockroachDB()
- assert.True(t, cdb.Init())
+ assert.NoError(t, cdb.Init())
}
func TestCockroachDB_Init_ReturnsFalseIfConfigURLIsNotSet(t *testing.T) {
cdb := prepareCockroachDB()
cdb.URL = ""
- assert.False(t, cdb.Init())
+ assert.Error(t, cdb.Init())
}
func TestCockroachDB_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) {
cdb := prepareCockroachDB()
cdb.Client.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, cdb.Init())
+ assert.Error(t, cdb.Init())
}
func TestCockroachDB_Check(t *testing.T) {
cdb, srv := prepareClientServer(t)
defer srv.Close()
- assert.True(t, cdb.Check())
+ assert.NoError(t, cdb.Check())
}
func TestCockroachDB_Check_ReturnsFalseIfConnectionRefused(t *testing.T) {
cdb := New()
cdb.URL = "http://127.0.0.1:38001/metrics"
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
- assert.False(t, cdb.Check())
+ assert.Error(t, cdb.Check())
}
func TestCockroachDB_Charts(t *testing.T) {
@@ -221,7 +235,7 @@ func TestCockroachDB_Collect_ReturnsNilIfNotCockroachDBMetrics(t *testing.T) {
func TestCockroachDB_Collect_ReturnsNilIfConnectionRefused(t *testing.T) {
cdb := prepareCockroachDB()
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
assert.Nil(t, cdb.Collect())
}
@@ -267,12 +281,12 @@ func prepareClientServer(t *testing.T) (*CockroachDB, *httptest.Server) {
t.Helper()
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsData)
+ _, _ = w.Write(dataExpectedMetrics)
}))
cdb := New()
cdb.URL = ts.URL
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, ts
}
@@ -281,12 +295,12 @@ func prepareClientServerNotCockroachDBMetricResponse(t *testing.T) (*CockroachDB
t.Helper()
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(wrongMetricsData)
+ _, _ = w.Write(dataUnexpectedMetrics)
}))
cdb := New()
cdb.URL = ts.URL
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, ts
}
@@ -300,7 +314,7 @@ func prepareClientServerInvalidDataResponse(t *testing.T) (*CockroachDB, *httpte
cdb := New()
cdb.URL = ts.URL
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, ts
}
@@ -314,6 +328,6 @@ func prepareClientServerResponse404(t *testing.T) (*CockroachDB, *httptest.Serve
cdb := New()
cdb.URL = ts.URL
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, ts
}
diff --git a/modules/cockroachdb/config_schema.json b/modules/cockroachdb/config_schema.json
index e732b99f6..f0d9f7041 100644
--- a/modules/cockroachdb/config_schema.json
+++ b/modules/cockroachdb/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/cockroachdb job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "CockroachDB collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the CockroachDB Prometheus endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8080/_status/vars"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/cockroachdb/init.go b/modules/cockroachdb/init.go
new file mode 100644
index 000000000..07986a199
--- /dev/null
+++ b/modules/cockroachdb/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package cockroachdb
+
+import (
+ "errors"
+ "github.com/netdata/go.d.plugin/pkg/web"
+
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+)
+
+func (c *CockroachDB) validateConfig() error {
+ if c.URL == "" {
+ return errors.New("URL is not set")
+ }
+ return nil
+}
+
+func (c *CockroachDB) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(c.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, c.Request), nil
+}
diff --git a/modules/cockroachdb/testdata/config.json b/modules/cockroachdb/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/cockroachdb/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/cockroachdb/testdata/config.yaml b/modules/cockroachdb/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/cockroachdb/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/consul/config_schema.json b/modules/consul/config_schema.json
index a71723696..d074e7d6b 100644
--- a/modules/consul/config_schema.json
+++ b/modules/consul/config_schema.json
@@ -1,62 +1,159 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/consul job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Consul collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Consul HTTP API.",
+ "type": "string",
+ "default": "http://127.0.0.1:8500"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "acl_token": {
+ "title": "X-Consul-Token",
+ "description": "The token for authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "acl_token",
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "acl_token": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/consul/consul.go b/modules/consul/consul.go
index ebd10984a..b07854b5d 100644
--- a/modules/consul/consul.go
+++ b/modules/consul/consul.go
@@ -4,15 +4,16 @@ package consul
import (
_ "embed"
+ "errors"
"net/http"
"sync"
"time"
- "github.com/blang/semver/v4"
-
"github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
+
+ "github.com/blang/semver/v4"
)
//go:embed "config_schema.json"
@@ -32,8 +33,12 @@ func New() *Consul {
return &Consul{
Config: Config{
HTTP: web.HTTP{
- Request: web.Request{URL: "http://127.0.0.1:8500"},
- Client: web.Client{Timeout: web.Duration{Duration: time.Second * 2}},
+ Request: web.Request{
+ URL: "http://127.0.0.1:8500",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
charts: &module.Charts{},
@@ -44,15 +49,14 @@ func New() *Consul {
}
type Config struct {
- web.HTTP `yaml:",inline"`
-
- ACLToken string `yaml:"acl_token"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ ACLToken string `yaml:"acl_token" json:"acl_token"`
}
type Consul struct {
module.Base
-
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
addGlobalChartsOnce *sync.Once
@@ -61,39 +65,51 @@ type Consul struct {
httpClient *http.Client
prom prometheus.Prometheus
- cfg *consulConfig
- version *semver.Version
-
+ cfg *consulConfig
+ version *semver.Version
hasLeaderCharts bool
hasFollowerCharts bool
checks map[string]bool
}
-func (c *Consul) Init() bool {
+func (c *Consul) Configuration() any {
+ return c.Config
+}
+
+func (c *Consul) Init() error {
if err := c.validateConfig(); err != nil {
c.Errorf("config validation: %v", err)
- return false
+ return err
}
httpClient, err := c.initHTTPClient()
if err != nil {
c.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
c.httpClient = httpClient
prom, err := c.initPrometheusClient(httpClient)
if err != nil {
c.Errorf("init Prometheus client: %v", err)
- return false
+ return err
}
c.prom = prom
- return true
+ return nil
}
-func (c *Consul) Check() bool {
- return len(c.Collect()) > 0
+func (c *Consul) Check() error {
+ mx, err := c.collect()
+ if err != nil {
+ c.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (c *Consul) Charts() *module.Charts {
diff --git a/modules/consul/consul_test.go b/modules/consul/consul_test.go
index b8f990893..594d1291b 100644
--- a/modules/consul/consul_test.go
+++ b/modules/consul/consul_test.go
@@ -8,51 +8,61 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "github.com/netdata/go.d.plugin/pkg/web"
)
var (
- datav1132Checks, _ = os.ReadFile("testdata/v1.13.2/v1-agent-checks.json")
- dataV1132ClientSelf, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-self.json")
- dataV1132ClientPromMetrics, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-metrics.txt")
- dataV1132ServerSelf, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self.json")
- dataV1132ServerSelfDisabledPrometheus, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_disabled_prom.json")
- dataV1132ServerSelfWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_with_hostname.json")
- dataV1132ServerPromMetrics, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics.txt")
- dataV1132ServerPromMetricsWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt")
- dataV1132ServerOperatorAutopilotHealth, _ = os.ReadFile("testdata/v1.13.2/server_v1-operator-autopilot-health.json")
- dataV1132ServerCoordinateNodes, _ = os.ReadFile("testdata/v1.13.2/server_v1-coordinate-nodes.json")
-
- dataV1143CloudServerPromMetrics, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-metrics.txt")
- dataV1143CloudServerSelf, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-self.json")
- dataV1143CloudServerCoordinateNodes, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json")
- dataV1143CloudChecks, _ = os.ReadFile("testdata/v1.14.3-cloud/v1-agent-checks.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer1132Checks, _ = os.ReadFile("testdata/v1.13.2/v1-agent-checks.json")
+ dataVer1132ClientSelf, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-self.json")
+ dataVer1132ClientPromMetrics, _ = os.ReadFile("testdata/v1.13.2/client_v1-agent-metrics.txt")
+ dataVer1132ServerSelf, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self.json")
+ dataVer1132ServerSelfDisabledPrometheus, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_disabled_prom.json")
+ dataVer1132ServerSelfWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-self_with_hostname.json")
+ dataVer1132ServerPromMetrics, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics.txt")
+ dataVer1132ServerPromMetricsWithHostname, _ = os.ReadFile("testdata/v1.13.2/server_v1-agent-metrics_with_hostname.txt")
+ dataVer1132ServerOperatorAutopilotHealth, _ = os.ReadFile("testdata/v1.13.2/server_v1-operator-autopilot-health.json")
+ dataVer1132ServerCoordinateNodes, _ = os.ReadFile("testdata/v1.13.2/server_v1-coordinate-nodes.json")
+
+ dataVer1143CloudServerPromMetrics, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-metrics.txt")
+ dataVer1143CloudServerSelf, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-agent-self.json")
+ dataVer1143CloudServerCoordinateNodes, _ = os.ReadFile("testdata/v1.14.3-cloud/server_v1-coordinate-nodes.json")
+ dataVer1143CloudChecks, _ = os.ReadFile("testdata/v1.14.3-cloud/v1-agent-checks.json")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "datav1132Checks": datav1132Checks,
- "dataV1132ClientSelf": dataV1132ClientSelf,
- "dataV1132ClientPromMetrics": dataV1132ClientPromMetrics,
- "dataV1132ServerSelf": dataV1132ServerSelf,
- "dataV1132ServerSelfWithHostname": dataV1132ServerSelfWithHostname,
- "dataV1132ServerSelfDisabledPrometheus": dataV1132ServerSelfDisabledPrometheus,
- "dataV1132ServerPromMetrics": dataV1132ServerPromMetrics,
- "dataV1132ServerPromMetricsWithHostname": dataV1132ServerPromMetricsWithHostname,
- "dataV1132ServerOperatorAutopilotHealth": dataV1132ServerOperatorAutopilotHealth,
- "dataV1132ServerCoordinateNodes": dataV1132ServerCoordinateNodes,
- "dataV1143CloudServerPromMetrics": dataV1143CloudServerPromMetrics,
- "dataV1143CloudServerSelf": dataV1143CloudServerSelf,
- "dataV1143CloudServerCoordinateNodes": dataV1143CloudServerCoordinateNodes,
- "dataV1143CloudChecks": dataV1143CloudChecks,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer1132Checks": dataVer1132Checks,
+ "dataVer1132ClientSelf": dataVer1132ClientSelf,
+ "dataVer1132ClientPromMetrics": dataVer1132ClientPromMetrics,
+ "dataVer1132ServerSelf": dataVer1132ServerSelf,
+ "dataVer1132ServerSelfWithHostname": dataVer1132ServerSelfWithHostname,
+ "dataVer1132ServerSelfDisabledPrometheus": dataVer1132ServerSelfDisabledPrometheus,
+ "dataVer1132ServerPromMetrics": dataVer1132ServerPromMetrics,
+ "dataVer1132ServerPromMetricsWithHostname": dataVer1132ServerPromMetricsWithHostname,
+ "dataVer1132ServerOperatorAutopilotHealth": dataVer1132ServerOperatorAutopilotHealth,
+ "dataVer1132ServerCoordinateNodes": dataVer1132ServerCoordinateNodes,
+ "dataVer1143CloudServerPromMetrics": dataVer1143CloudServerPromMetrics,
+ "dataVer1143CloudServerSelf": dataVer1143CloudServerSelf,
+ "dataVer1143CloudServerCoordinateNodes": dataVer1143CloudServerCoordinateNodes,
+ "dataVer1143CloudChecks": dataVer1143CloudChecks,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestConsul_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Consul{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestConsul_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -78,9 +88,9 @@ func TestConsul_Init(t *testing.T) {
consul.Config = test.config
if test.wantFail {
- assert.False(t, consul.Init())
+ assert.Error(t, consul.Init())
} else {
- assert.True(t, consul.Init())
+ assert.NoError(t, consul.Init())
}
})
}
@@ -131,9 +141,9 @@ func TestConsul_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, consul.Check())
+ assert.Error(t, consul.Check())
} else {
- assert.True(t, consul.Check())
+ assert.NoError(t, consul.Check())
}
})
}
@@ -544,15 +554,15 @@ func caseConsulV1143CloudServerResponse(t *testing.T) (*Consul, func()) {
func(w http.ResponseWriter, r *http.Request) {
switch {
case r.URL.Path == urlPathAgentSelf:
- _, _ = w.Write(dataV1143CloudServerSelf)
+ _, _ = w.Write(dataVer1143CloudServerSelf)
case r.URL.Path == urlPathAgentChecks:
- _, _ = w.Write(dataV1143CloudChecks)
+ _, _ = w.Write(dataVer1143CloudChecks)
case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
- _, _ = w.Write(dataV1143CloudServerPromMetrics)
+ _, _ = w.Write(dataVer1143CloudServerPromMetrics)
case r.URL.Path == urlPathOperationAutopilotHealth:
w.WriteHeader(http.StatusForbidden)
case r.URL.Path == urlPathCoordinateNodes:
- _, _ = w.Write(dataV1143CloudServerCoordinateNodes)
+ _, _ = w.Write(dataVer1143CloudServerCoordinateNodes)
default:
w.WriteHeader(http.StatusNotFound)
}
@@ -561,7 +571,7 @@ func caseConsulV1143CloudServerResponse(t *testing.T) (*Consul, func()) {
consul := New()
consul.URL = srv.URL
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, srv.Close
}
@@ -572,15 +582,15 @@ func caseConsulV1132ServerResponse(t *testing.T) (*Consul, func()) {
func(w http.ResponseWriter, r *http.Request) {
switch {
case r.URL.Path == urlPathAgentSelf:
- _, _ = w.Write(dataV1132ServerSelf)
+ _, _ = w.Write(dataVer1132ServerSelf)
case r.URL.Path == urlPathAgentChecks:
- _, _ = w.Write(datav1132Checks)
+ _, _ = w.Write(dataVer1132Checks)
case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
- _, _ = w.Write(dataV1132ServerPromMetrics)
+ _, _ = w.Write(dataVer1132ServerPromMetrics)
case r.URL.Path == urlPathOperationAutopilotHealth:
- _, _ = w.Write(dataV1132ServerOperatorAutopilotHealth)
+ _, _ = w.Write(dataVer1132ServerOperatorAutopilotHealth)
case r.URL.Path == urlPathCoordinateNodes:
- _, _ = w.Write(dataV1132ServerCoordinateNodes)
+ _, _ = w.Write(dataVer1132ServerCoordinateNodes)
default:
w.WriteHeader(http.StatusNotFound)
}
@@ -589,7 +599,7 @@ func caseConsulV1132ServerResponse(t *testing.T) (*Consul, func()) {
consul := New()
consul.URL = srv.URL
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, srv.Close
}
@@ -600,15 +610,15 @@ func caseConsulV1132ServerWithHostnameResponse(t *testing.T) (*Consul, func()) {
func(w http.ResponseWriter, r *http.Request) {
switch {
case r.URL.Path == urlPathAgentSelf:
- _, _ = w.Write(dataV1132ServerSelfWithHostname)
+ _, _ = w.Write(dataVer1132ServerSelfWithHostname)
case r.URL.Path == urlPathAgentChecks:
- _, _ = w.Write(datav1132Checks)
+ _, _ = w.Write(dataVer1132Checks)
case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
- _, _ = w.Write(dataV1132ServerPromMetricsWithHostname)
+ _, _ = w.Write(dataVer1132ServerPromMetricsWithHostname)
case r.URL.Path == urlPathOperationAutopilotHealth:
- _, _ = w.Write(dataV1132ServerOperatorAutopilotHealth)
+ _, _ = w.Write(dataVer1132ServerOperatorAutopilotHealth)
case r.URL.Path == urlPathCoordinateNodes:
- _, _ = w.Write(dataV1132ServerCoordinateNodes)
+ _, _ = w.Write(dataVer1132ServerCoordinateNodes)
default:
w.WriteHeader(http.StatusNotFound)
}
@@ -617,7 +627,7 @@ func caseConsulV1132ServerWithHostnameResponse(t *testing.T) (*Consul, func()) {
consul := New()
consul.URL = srv.URL
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, srv.Close
}
@@ -628,13 +638,13 @@ func caseConsulV1132ServerWithDisabledPrometheus(t *testing.T) (*Consul, func())
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathAgentSelf:
- _, _ = w.Write(dataV1132ServerSelfDisabledPrometheus)
+ _, _ = w.Write(dataVer1132ServerSelfDisabledPrometheus)
case urlPathAgentChecks:
- _, _ = w.Write(datav1132Checks)
+ _, _ = w.Write(dataVer1132Checks)
case urlPathOperationAutopilotHealth:
- _, _ = w.Write(dataV1132ServerOperatorAutopilotHealth)
+ _, _ = w.Write(dataVer1132ServerOperatorAutopilotHealth)
case urlPathCoordinateNodes:
- _, _ = w.Write(dataV1132ServerCoordinateNodes)
+ _, _ = w.Write(dataVer1132ServerCoordinateNodes)
default:
w.WriteHeader(http.StatusNotFound)
}
@@ -643,7 +653,7 @@ func caseConsulV1132ServerWithDisabledPrometheus(t *testing.T) (*Consul, func())
consul := New()
consul.URL = srv.URL
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, srv.Close
}
@@ -654,11 +664,11 @@ func caseConsulV1132ClientResponse(t *testing.T) (*Consul, func()) {
func(w http.ResponseWriter, r *http.Request) {
switch {
case r.URL.Path == urlPathAgentSelf:
- _, _ = w.Write(dataV1132ClientSelf)
+ _, _ = w.Write(dataVer1132ClientSelf)
case r.URL.Path == urlPathAgentChecks:
- _, _ = w.Write(datav1132Checks)
+ _, _ = w.Write(dataVer1132Checks)
case r.URL.Path == urlPathAgentMetrics && r.URL.RawQuery == "format=prometheus":
- _, _ = w.Write(dataV1132ClientPromMetrics)
+ _, _ = w.Write(dataVer1132ClientPromMetrics)
default:
w.WriteHeader(http.StatusNotFound)
}
@@ -667,7 +677,7 @@ func caseConsulV1132ClientResponse(t *testing.T) (*Consul, func()) {
consul := New()
consul.URL = srv.URL
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, srv.Close
}
@@ -682,7 +692,7 @@ func caseInvalidDataResponse(t *testing.T) (*Consul, func()) {
consul := New()
consul.URL = srv.URL
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, srv.Close
}
@@ -691,7 +701,7 @@ func caseConnectionRefused(t *testing.T) (*Consul, func()) {
t.Helper()
consul := New()
consul.URL = "http://127.0.0.1:65535/"
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, func() {}
}
@@ -705,7 +715,7 @@ func case404(t *testing.T) (*Consul, func()) {
consul := New()
consul.URL = srv.URL
- require.True(t, consul.Init())
+ require.NoError(t, consul.Init())
return consul, srv.Close
}
diff --git a/modules/consul/testdata/config.json b/modules/consul/testdata/config.json
new file mode 100644
index 000000000..bcd07a41b
--- /dev/null
+++ b/modules/consul/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "acl_token": "ok"
+}
diff --git a/modules/consul/testdata/config.yaml b/modules/consul/testdata/config.yaml
new file mode 100644
index 000000000..def554c7e
--- /dev/null
+++ b/modules/consul/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+acl_token: "ok"
diff --git a/modules/coredns/config_schema.json b/modules/coredns/config_schema.json
index 70b9ef001..0ffb11c52 100644
--- a/modules/coredns/config_schema.json
+++ b/modules/coredns/config_schema.json
@@ -1,93 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/coredns job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "per_server_stats": {
- "type": "object",
- "properties": {
- "includes": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "excludes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "CoreDNS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the CoreDNS metrics page to monitor.",
+ "type": "string",
+ "default": "http://127.0.0.1:9153/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
}
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
}
},
- "per_zone_stats": {
- "type": "object",
- "properties": {
- "includes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
},
- "excludes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
}
- }
- },
- "username": {
- "type": "string"
+ ]
},
- "password": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "proxy_url": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "proxy_username": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/coredns/coredns.go b/modules/coredns/coredns.go
index 18c92caf3..c3bb6b808 100644
--- a/modules/coredns/coredns.go
+++ b/modules/coredns/coredns.go
@@ -4,19 +4,15 @@ package coredns
import (
_ "embed"
+ "errors"
"time"
- "github.com/blang/semver/v4"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-const (
- defaultURL = "http://127.0.0.1:9153/metrics"
- defaultHTTPTimeout = time.Second * 2
+ "github.com/blang/semver/v4"
)
//go:embed "config_schema.json"
@@ -29,39 +25,39 @@ func init() {
})
}
-// New creates CoreDNS with default values.
func New() *CoreDNS {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &CoreDNS{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:9153/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
- return &CoreDNS{
- Config: config,
charts: summaryCharts.Copy(),
collectedServers: make(map[string]bool),
collectedZones: make(map[string]bool),
}
}
-// Config is the CoreDNS module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
- PerServerStats matcher.SimpleExpr `yaml:"per_server_stats"`
- PerZoneStats matcher.SimpleExpr `yaml:"per_zone_stats"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ PerServerStats matcher.SimpleExpr `yaml:"per_server_stats" json:"per_server_stats"`
+ PerZoneStats matcher.SimpleExpr `yaml:"per_zone_stats" json:"per_zone_stats"`
}
-// CoreDNS CoreDNS module.
type CoreDNS struct {
module.Base
- Config `yaml:",inline"`
- charts *Charts
- prom prometheus.Prometheus
+ Config `yaml:",inline" json:""`
+
+ prom prometheus.Prometheus
+
+ charts *Charts
+
perServerMatcher matcher.Matcher
perZoneMatcher matcher.Matcher
collectedServers map[string]bool
@@ -71,56 +67,61 @@ type CoreDNS struct {
metricNames requestMetricsNames
}
-// Cleanup makes cleanup.
-func (CoreDNS) Cleanup() {}
+func (cd *CoreDNS) Configuration() any {
+ return cd.Config
+}
-// Init makes initialization.
-func (cd *CoreDNS) Init() bool {
- if cd.URL == "" {
- cd.Error("URL not set")
- return false
+func (cd *CoreDNS) Init() error {
+ if err := cd.validateConfig(); err != nil {
+ cd.Errorf("config validation: %v", err)
+ return err
}
- if !cd.PerServerStats.Empty() {
- m, err := cd.PerServerStats.Parse()
- if err != nil {
- cd.Errorf("error on creating 'per_server_stats' matcher : %v", err)
- return false
- }
- cd.perServerMatcher = matcher.WithCache(m)
+ sm, err := cd.initPerServerMatcher()
+ if err != nil {
+ cd.Error(err)
+ return err
}
-
- if !cd.PerZoneStats.Empty() {
- m, err := cd.PerZoneStats.Parse()
- if err != nil {
- cd.Errorf("error on creating 'per_zone_stats' matcher : %v", err)
- return false
- }
- cd.perZoneMatcher = matcher.WithCache(m)
+ if sm != nil {
+ cd.perServerMatcher = sm
}
- client, err := web.NewHTTPClient(cd.Client)
+ zm, err := cd.initPerZoneMatcher()
if err != nil {
- cd.Errorf("error on creating http client : %v", err)
- return false
+ cd.Error(err)
+ return err
+ }
+ if zm != nil {
+ cd.perZoneMatcher = zm
}
- cd.prom = prometheus.New(client, cd.Request)
+ prom, err := cd.initPrometheusClient()
+ if err != nil {
+ cd.Error(err)
+ return err
+ }
+ cd.prom = prom
- return true
+ return nil
}
-// Check makes check.
-func (cd *CoreDNS) Check() bool {
- return len(cd.Collect()) > 0
+func (cd *CoreDNS) Check() error {
+ mx, err := cd.collect()
+ if err != nil {
+ cd.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
-// Charts creates Charts.
func (cd *CoreDNS) Charts() *Charts {
return cd.charts
}
-// Collect collects metrics.
func (cd *CoreDNS) Collect() map[string]int64 {
mx, err := cd.collect()
@@ -131,3 +132,9 @@ func (cd *CoreDNS) Collect() map[string]int64 {
return mx
}
+
+func (cd *CoreDNS) Cleanup() {
+ if cd.prom != nil && cd.prom.HTTPClient() != nil {
+ cd.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/coredns/coredns_test.go b/modules/coredns/coredns_test.go
index a6b77976a..8521f1992 100644
--- a/modules/coredns/coredns_test.go
+++ b/modules/coredns/coredns_test.go
@@ -8,36 +8,59 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testNoLoad169, _ = os.ReadFile("testdata/version169/no_load.txt")
- testSomeLoad169, _ = os.ReadFile("testdata/version169/some_load.txt")
- testNoLoad170, _ = os.ReadFile("testdata/version170/no_load.txt")
- testSomeLoad170, _ = os.ReadFile("testdata/version170/some_load.txt")
- testNoLoadNoVersion, _ = os.ReadFile("testdata/no_version/no_load.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer169NoLoad, _ = os.ReadFile("testdata/version169/no_load.txt")
+ dataVer169SomeLoad, _ = os.ReadFile("testdata/version169/some_load.txt")
+
+ dataVer170NoLoad, _ = os.ReadFile("testdata/version170/no_load.txt")
+ dataVer170SomeLoad, _ = os.ReadFile("testdata/version170/some_load.txt")
+
+ dataNoLoadNoVersion, _ = os.ReadFile("testdata/no_version/no_load.txt")
)
-func TestNew(t *testing.T) {
- job := New()
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer169NoLoad": dataVer169NoLoad,
+ "dataVer169SomeLoad": dataVer169SomeLoad,
+ "dataVer170NoLoad": dataVer170NoLoad,
+ "dataVer170SomeLoad": dataVer170SomeLoad,
+ "dataNoLoadNoVersion": dataNoLoadNoVersion,
+ } {
+ require.NotNilf(t, data, name)
+ }
+}
- assert.IsType(t, (*CoreDNS)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+func TestCoreDNS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &CoreDNS{}, dataConfigJSON, dataConfigYAML)
}
-func TestCoreDNS_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
+func TestCoreDNS_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
-func TestCoreDNS_Cleanup(t *testing.T) { New().Cleanup() }
+func TestCoreDNS_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
-func TestCoreDNS_Init(t *testing.T) { assert.True(t, New().Init()) }
+func TestCoreDNS_Init(t *testing.T) {
+ assert.NoError(t, New().Init())
+}
func TestCoreDNS_InitNG(t *testing.T) {
job := New()
job.URL = ""
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestCoreDNS_Check(t *testing.T) {
@@ -45,8 +68,8 @@ func TestCoreDNS_Check(t *testing.T) {
name string
data []byte
}{
- {"version 1.6.9", testNoLoad169},
- {"version 1.7.0", testNoLoad170},
+ {"version 1.6.9", dataVer169NoLoad},
+ {"version 1.7.0", dataVer170NoLoad},
}
for _, testNoLoad := range tests {
t.Run(testNoLoad.name, func(t *testing.T) {
@@ -60,8 +83,8 @@ func TestCoreDNS_Check(t *testing.T) {
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
})
}
}
@@ -69,8 +92,8 @@ func TestCoreDNS_Check(t *testing.T) {
func TestCoreDNS_CheckNG(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestCoreDNS_Collect(t *testing.T) {
@@ -78,8 +101,8 @@ func TestCoreDNS_Collect(t *testing.T) {
name string
data []byte
}{
- {"version 1.6.9", testSomeLoad169},
- {"version 1.7.0", testSomeLoad170},
+ {"version 1.6.9", dataVer169SomeLoad},
+ {"version 1.7.0", dataVer170SomeLoad},
}
for _, testSomeLoad := range tests {
t.Run(testSomeLoad.name, func(t *testing.T) {
@@ -95,8 +118,8 @@ func TestCoreDNS_Collect(t *testing.T) {
job.URL = ts.URL + "/metrics"
job.PerServerStats.Includes = []string{"glob:*"}
job.PerZoneStats.Includes = []string{"glob:*"}
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"coredns.io._request_per_ip_family_v4": 19,
@@ -428,8 +451,8 @@ func TestCoreDNS_CollectNoLoad(t *testing.T) {
name string
data []byte
}{
- {"version 1.6.9", testNoLoad169},
- {"version 1.7.0", testNoLoad170},
+ {"version 1.6.9", dataVer169NoLoad},
+ {"version 1.7.0", dataVer170NoLoad},
}
for _, testNoLoad := range tests {
t.Run(testNoLoad.name, func(t *testing.T) {
@@ -444,8 +467,8 @@ func TestCoreDNS_CollectNoLoad(t *testing.T) {
job.URL = ts.URL + "/metrics"
job.PerServerStats.Includes = []string{"glob:*"}
job.PerZoneStats.Includes = []string{"glob:*"}
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"no_matching_zone_dropped_total": 0,
@@ -513,8 +536,8 @@ func TestCoreDNS_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestCoreDNS_404(t *testing.T) {
@@ -527,15 +550,15 @@ func TestCoreDNS_404(t *testing.T) {
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestCoreDNS_CollectNoVersion(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testNoLoadNoVersion)
+ _, _ = w.Write(dataNoLoadNoVersion)
}))
defer ts.Close()
@@ -543,8 +566,8 @@ func TestCoreDNS_CollectNoVersion(t *testing.T) {
job.URL = ts.URL + "/metrics"
job.PerServerStats.Includes = []string{"glob:*"}
job.PerZoneStats.Includes = []string{"glob:*"}
- require.True(t, job.Init())
- require.False(t, job.Check())
+ require.NoError(t, job.Init())
+ require.Error(t, job.Check())
assert.Nil(t, job.Collect())
}
diff --git a/modules/coredns/init.go b/modules/coredns/init.go
new file mode 100644
index 000000000..79d05926d
--- /dev/null
+++ b/modules/coredns/init.go
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package coredns
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (cd *CoreDNS) validateConfig() error {
+ if cd.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (cd *CoreDNS) initPerServerMatcher() (matcher.Matcher, error) {
+ if cd.PerServerStats.Empty() {
+ return nil, nil
+ }
+ return cd.PerServerStats.Parse()
+}
+
+func (cd *CoreDNS) initPerZoneMatcher() (matcher.Matcher, error) {
+ if cd.PerZoneStats.Empty() {
+ return nil, nil
+ }
+ return cd.PerZoneStats.Parse()
+}
+
+func (cd *CoreDNS) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(cd.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, cd.Request), nil
+}
diff --git a/modules/coredns/testdata/config.json b/modules/coredns/testdata/config.json
new file mode 100644
index 000000000..2dc54a1a2
--- /dev/null
+++ b/modules/coredns/testdata/config.json
@@ -0,0 +1,36 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "per_server_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ },
+ "per_zone_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/modules/coredns/testdata/config.yaml b/modules/coredns/testdata/config.yaml
new file mode 100644
index 000000000..be474167f
--- /dev/null
+++ b/modules/coredns/testdata/config.yaml
@@ -0,0 +1,27 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+per_server_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
+per_zone_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/modules/couchbase/config_schema.json b/modules/couchbase/config_schema.json
index 307a1261b..5f5b62f54 100644
--- a/modules/couchbase/config_schema.json
+++ b/modules/couchbase/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/couchbase job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Couchbase collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Couchbase server management REST API.",
+ "type": "string",
+ "default": "http://127.0.0.1:8091"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/couchbase/couchbase.go b/modules/couchbase/couchbase.go
index b92ec2d76..85960a4fc 100644
--- a/modules/couchbase/couchbase.go
+++ b/modules/couchbase/couchbase.go
@@ -4,6 +4,7 @@ package couchbase
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -32,7 +33,7 @@ func New() *Couchbase {
URL: "http://127.0.0.1:8091",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -40,53 +41,60 @@ func New() *Couchbase {
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- }
- Couchbase struct {
- module.Base
- Config `yaml:",inline"`
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
- httpClient *http.Client
- charts *module.Charts
- collectedBuckets map[string]bool
- }
-)
+type Couchbase struct {
+ module.Base
+ Config `yaml:",inline" json:""`
-func (cb *Couchbase) Cleanup() {
- if cb.httpClient == nil {
- return
- }
- cb.httpClient.CloseIdleConnections()
+ httpClient *http.Client
+ charts *module.Charts
+
+ collectedBuckets map[string]bool
}
-func (cb *Couchbase) Init() bool {
+func (cb *Couchbase) Configuration() any {
+ return cb.Config
+}
+
+func (cb *Couchbase) Init() error {
err := cb.validateConfig()
if err != nil {
cb.Errorf("check configuration: %v", err)
- return false
+ return err
}
httpClient, err := cb.initHTTPClient()
if err != nil {
cb.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
cb.httpClient = httpClient
charts, err := cb.initCharts()
if err != nil {
cb.Errorf("init charts: %v", err)
- return false
+ return err
}
-
cb.charts = charts
- return true
+
+ return nil
}
-func (cb *Couchbase) Check() bool {
- return len(cb.Collect()) > 0
+func (cb *Couchbase) Check() error {
+ mx, err := cb.collect()
+ if err != nil {
+ cb.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (cb *Couchbase) Charts() *Charts {
@@ -104,3 +112,10 @@ func (cb *Couchbase) Collect() map[string]int64 {
}
return mx
}
+
+func (cb *Couchbase) Cleanup() {
+ if cb.httpClient == nil {
+ return
+ }
+ cb.httpClient.CloseIdleConnections()
+}
diff --git a/modules/couchbase/couchbase_test.go b/modules/couchbase/couchbase_test.go
index da0fa4e66..f89fb1cab 100644
--- a/modules/couchbase/couchbase_test.go
+++ b/modules/couchbase/couchbase_test.go
@@ -16,21 +16,26 @@ import (
)
var (
- v660BucketsBasicStats, _ = os.ReadFile("testdata/6.6.0/buckets_basic_stats.json")
-)
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
-}
+ dataVer660BucketsBasicStats, _ = os.ReadFile("testdata/6.6.0/buckets_basic_stats.json")
+)
-func Test_testDataIsCorrectlyReadAndValid(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v660BucketsBasicStats": v660BucketsBasicStats,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer660BucketsBasicStats": dataVer660BucketsBasicStats,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestCouchbase_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Couchbase{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestCouchbase_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -67,9 +72,9 @@ func TestCouchbase_Init(t *testing.T) {
cb.Config = test.config
if test.wantFail {
- assert.False(t, cb.Init())
+ assert.Error(t, cb.Init())
} else {
- assert.True(t, cb.Init())
+ assert.NoError(t, cb.Init())
}
})
}
@@ -103,9 +108,9 @@ func TestCouchbase_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, cb.Check())
+ assert.Error(t, cb.Check())
} else {
- assert.True(t, cb.Check())
+ assert.NoError(t, cb.Check())
}
})
}
@@ -173,12 +178,12 @@ func prepareCouchbaseV660(t *testing.T) (cb *Couchbase, cleanup func()) {
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(v660BucketsBasicStats)
+ _, _ = w.Write(dataVer660BucketsBasicStats)
}))
cb = New()
cb.URL = srv.URL
- require.True(t, cb.Init())
+ require.NoError(t, cb.Init())
return cb, srv.Close
}
@@ -191,7 +196,7 @@ func prepareCouchbaseInvalidData(t *testing.T) (*Couchbase, func()) {
}))
cb := New()
cb.URL = srv.URL
- require.True(t, cb.Init())
+ require.NoError(t, cb.Init())
return cb, srv.Close
}
@@ -204,7 +209,7 @@ func prepareCouchbase404(t *testing.T) (*Couchbase, func()) {
}))
cb := New()
cb.URL = srv.URL
- require.True(t, cb.Init())
+ require.NoError(t, cb.Init())
return cb, srv.Close
}
@@ -213,7 +218,7 @@ func prepareCouchbaseConnectionRefused(t *testing.T) (*Couchbase, func()) {
t.Helper()
cb := New()
cb.URL = "http://127.0.0.1:38001"
- require.True(t, cb.Init())
+ require.NoError(t, cb.Init())
return cb, func() {}
}
diff --git a/modules/couchbase/init.go b/modules/couchbase/init.go
index c274ee572..abb330717 100644
--- a/modules/couchbase/init.go
+++ b/modules/couchbase/init.go
@@ -24,11 +24,11 @@ func (cb *Couchbase) initCharts() (*Charts, error) {
return bucketCharts.Copy(), nil
}
-func (cb Couchbase) initHTTPClient() (*http.Client, error) {
+func (cb *Couchbase) initHTTPClient() (*http.Client, error) {
return web.NewHTTPClient(cb.Client)
}
-func (cb Couchbase) validateConfig() error {
+func (cb *Couchbase) validateConfig() error {
if cb.URL == "" {
return errors.New("URL not set")
}
diff --git a/modules/couchbase/testdata/config.json b/modules/couchbase/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/couchbase/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/couchbase/testdata/config.yaml b/modules/couchbase/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/couchbase/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/couchdb/collect.go b/modules/couchdb/collect.go
index 9fd041800..27dd33549 100644
--- a/modules/couchdb/collect.go
+++ b/modules/couchdb/collect.go
@@ -42,7 +42,7 @@ func (cdb *CouchDB) collect() (map[string]int64, error) {
return collected, nil
}
-func (CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) {
+func (cdb *CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) {
if !ms.hasNodeStats() {
return
}
@@ -56,7 +56,7 @@ func (CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) {
}
}
-func (CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) {
+func (cdb *CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) {
if !ms.hasNodeSystem() {
return
}
@@ -68,7 +68,7 @@ func (CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) {
collected["peak_msg_queue"] = findMaxMQSize(ms.NodeSystem.MessageQueues)
}
-func (CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) {
+func (cdb *CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) {
collected["active_tasks_indexer"] = 0
collected["active_tasks_database_compaction"] = 0
collected["active_tasks_replication"] = 0
diff --git a/modules/couchdb/config_schema.json b/modules/couchdb/config_schema.json
index e3a67e322..6df79507a 100644
--- a/modules/couchdb/config_schema.json
+++ b/modules/couchdb/config_schema.json
@@ -1,65 +1,166 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/couchdb job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "CouchDB collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the CouchDB web server.",
+ "type": "string",
+ "default": "http://127.0.0.1:5984"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "node": {
+ "title": "Node name",
+ "description": "CouchDB node name. Same as -name vm.args argument.",
+ "type": "string",
+ "default": "_local"
+ },
+ "databases": {
+ "title": "Databases",
+ "description": "A space-separated list of database names for which database-specific statistics should be displayed.",
+ "type": "string"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url",
+ "node"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "node",
+ "databases"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "node": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "databases": {
- "type": "string"
- },
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/couchdb/couchdb.go b/modules/couchdb/couchdb.go
index 3342b7b7f..f56503164 100644
--- a/modules/couchdb/couchdb.go
+++ b/modules/couchdb/couchdb.go
@@ -4,6 +4,7 @@ package couchdb
import (
_ "embed"
+ "errors"
"net/http"
"strings"
"time"
@@ -33,7 +34,7 @@ func New() *CouchDB {
URL: "http://127.0.0.1:5984",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second * 2),
},
},
Node: "_local",
@@ -41,36 +42,33 @@ func New() *CouchDB {
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- Node string `yaml:"node"`
- Databases string `yaml:"databases"`
- }
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Node string `yaml:"node" json:"node"`
+ Databases string `yaml:"databases" json:"databases"`
+}
- CouchDB struct {
- module.Base
- Config `yaml:",inline"`
+type CouchDB struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- httpClient *http.Client
- charts *module.Charts
+ charts *module.Charts
- databases []string
- }
-)
+ httpClient *http.Client
-func (cdb *CouchDB) Cleanup() {
- if cdb.httpClient == nil {
- return
- }
- cdb.httpClient.CloseIdleConnections()
+ databases []string
+}
+
+func (cdb *CouchDB) Configuration() any {
+ return cdb.Config
}
-func (cdb *CouchDB) Init() bool {
+func (cdb *CouchDB) Init() error {
err := cdb.validateConfig()
if err != nil {
cdb.Errorf("check configuration: %v", err)
- return false
+ return err
}
cdb.databases = strings.Fields(cdb.Config.Databases)
@@ -78,26 +76,37 @@ func (cdb *CouchDB) Init() bool {
httpClient, err := cdb.initHTTPClient()
if err != nil {
cdb.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
cdb.httpClient = httpClient
charts, err := cdb.initCharts()
if err != nil {
cdb.Errorf("init charts: %v", err)
- return false
+ return err
}
cdb.charts = charts
- return true
+ return nil
}
-func (cdb *CouchDB) Check() bool {
+func (cdb *CouchDB) Check() error {
if err := cdb.pingCouchDB(); err != nil {
cdb.Error(err)
- return false
+ return err
}
- return len(cdb.Collect()) > 0
+
+ mx, err := cdb.collect()
+ if err != nil {
+ cdb.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+
+ return nil
}
func (cdb *CouchDB) Charts() *Charts {
@@ -115,3 +124,10 @@ func (cdb *CouchDB) Collect() map[string]int64 {
}
return mx
}
+
+func (cdb *CouchDB) Cleanup() {
+ if cdb.httpClient == nil {
+ return
+ }
+ cdb.httpClient.CloseIdleConnections()
+}
diff --git a/modules/couchdb/couchdb_test.go b/modules/couchdb/couchdb_test.go
index 29b5b64af..fe3f72ec4 100644
--- a/modules/couchdb/couchdb_test.go
+++ b/modules/couchdb/couchdb_test.go
@@ -17,27 +17,32 @@ import (
)
var (
- v311Root, _ = os.ReadFile("testdata/v3.1.1/root.json")
- v311ActiveTasks, _ = os.ReadFile("testdata/v3.1.1/active_tasks.json")
- v311NodeStats, _ = os.ReadFile("testdata/v3.1.1/node_stats.json")
- v311NodeSystem, _ = os.ReadFile("testdata/v3.1.1/node_system.json")
- v311DbsInfo, _ = os.ReadFile("testdata/v3.1.1/dbs_info.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer311Root, _ = os.ReadFile("testdata/v3.1.1/root.json")
+ dataVer311ActiveTasks, _ = os.ReadFile("testdata/v3.1.1/active_tasks.json")
+ dataVer311NodeStats, _ = os.ReadFile("testdata/v3.1.1/node_stats.json")
+ dataVer311NodeSystem, _ = os.ReadFile("testdata/v3.1.1/node_system.json")
+ dataVer311DbsInfo, _ = os.ReadFile("testdata/v3.1.1/dbs_info.json")
)
-func Test_testDataIsCorrectlyReadAndValid(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v311Root": v311Root,
- "v311ActiveTasks": v311ActiveTasks,
- "v311NodeStats": v311NodeStats,
- "v311NodeSystem": v311NodeSystem,
- "v311DbsInfo": v311DbsInfo,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer311Root": dataVer311Root,
+ "dataVer311ActiveTasks": dataVer311ActiveTasks,
+ "dataVer311NodeStats": dataVer311NodeStats,
+ "dataVer311NodeSystem": dataVer311NodeSystem,
+ "dataVer311DbsInfo": dataVer311DbsInfo,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestCouchDB_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &CouchDB{}, dataConfigJSON, dataConfigYAML)
}
func TestCouchDB_Init(t *testing.T) {
@@ -79,9 +84,9 @@ func TestCouchDB_Init(t *testing.T) {
es.Config = test.config
if test.wantFail {
- assert.False(t, es.Init())
+ assert.Error(t, es.Init())
} else {
- assert.True(t, es.Init())
+ assert.NoError(t, es.Init())
assert.Equal(t, test.wantNumOfCharts, len(*es.Charts()))
}
})
@@ -105,9 +110,9 @@ func TestCouchDB_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, cdb.Check())
+ assert.Error(t, cdb.Check())
} else {
- assert.True(t, cdb.Check())
+ assert.NoError(t, cdb.Check())
}
})
}
@@ -387,7 +392,7 @@ func prepareCouchDB(t *testing.T, createCDB func() *CouchDB) (cdb *CouchDB, clea
srv := prepareCouchDBEndpoint()
cdb.URL = srv.URL
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, srv.Close
}
@@ -404,7 +409,7 @@ func prepareCouchDBInvalidData(t *testing.T) (*CouchDB, func()) {
}))
cdb := New()
cdb.URL = srv.URL
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, srv.Close
}
@@ -417,7 +422,7 @@ func prepareCouchDB404(t *testing.T) (*CouchDB, func()) {
}))
cdb := New()
cdb.URL = srv.URL
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, srv.Close
}
@@ -426,7 +431,7 @@ func prepareCouchDBConnectionRefused(t *testing.T) (*CouchDB, func()) {
t.Helper()
cdb := New()
cdb.URL = "http://127.0.0.1:38001"
- require.True(t, cdb.Init())
+ require.NoError(t, cdb.Init())
return cdb, func() {}
}
@@ -436,15 +441,15 @@ func prepareCouchDBEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/_node/_local/_stats":
- _, _ = w.Write(v311NodeStats)
+ _, _ = w.Write(dataVer311NodeStats)
case "/_node/_local/_system":
- _, _ = w.Write(v311NodeSystem)
+ _, _ = w.Write(dataVer311NodeSystem)
case urlPathActiveTasks:
- _, _ = w.Write(v311ActiveTasks)
+ _, _ = w.Write(dataVer311ActiveTasks)
case "/_dbs_info":
- _, _ = w.Write(v311DbsInfo)
+ _, _ = w.Write(dataVer311DbsInfo)
case "/":
- _, _ = w.Write(v311Root)
+ _, _ = w.Write(dataVer311Root)
default:
w.WriteHeader(http.StatusNotFound)
}
diff --git a/modules/couchdb/testdata/config.json b/modules/couchdb/testdata/config.json
new file mode 100644
index 000000000..0fa716e5d
--- /dev/null
+++ b/modules/couchdb/testdata/config.json
@@ -0,0 +1,22 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "node": "ok",
+ "databases": "ok"
+}
diff --git a/modules/couchdb/testdata/config.yaml b/modules/couchdb/testdata/config.yaml
new file mode 100644
index 000000000..a4adf64e8
--- /dev/null
+++ b/modules/couchdb/testdata/config.yaml
@@ -0,0 +1,19 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+node: "ok"
+databases: "ok"
\ No newline at end of file
diff --git a/modules/dnsdist/config_schema.json b/modules/dnsdist/config_schema.json
index 880190ce2..a55a2dd1d 100644
--- a/modules/dnsdist/config_schema.json
+++ b/modules/dnsdist/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/dnsdist job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "DNSDist collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the DNSDist built-in webserver.",
+ "type": "string",
+ "default": "http://127.0.0.1:8083"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/dnsdist/dnsdist.go b/modules/dnsdist/dnsdist.go
index 0af242534..d7c936ce0 100644
--- a/modules/dnsdist/dnsdist.go
+++ b/modules/dnsdist/dnsdist.go
@@ -4,6 +4,7 @@ package dnsdist
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -24,18 +25,6 @@ func init() {
})
}
-type Config struct {
- web.HTTP `yaml:",inline"`
-}
-
-type DNSdist struct {
- module.Base
- Config `yaml:",inline"`
-
- httpClient *http.Client
- charts *module.Charts
-}
-
func New() *DNSdist {
return &DNSdist{
Config: Config{
@@ -44,39 +33,66 @@ func New() *DNSdist {
URL: "http://127.0.0.1:8083",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
},
}
}
-func (d *DNSdist) Init() bool {
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+
+type DNSdist struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ httpClient *http.Client
+}
+
+func (d *DNSdist) Configuration() any {
+ return d.Config
+}
+
+func (d *DNSdist) Init() error {
err := d.validateConfig()
if err != nil {
d.Errorf("config validation: %v", err)
- return false
+ return err
}
client, err := d.initHTTPClient()
if err != nil {
d.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
d.httpClient = client
cs, err := d.initCharts()
if err != nil {
d.Errorf("init charts: %v", err)
- return false
+ return err
}
d.charts = cs
- return true
+ return nil
}
-func (d *DNSdist) Check() bool {
- return len(d.Collect()) > 0
+func (d *DNSdist) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (d *DNSdist) Charts() *module.Charts {
@@ -100,6 +116,5 @@ func (d *DNSdist) Cleanup() {
if d.httpClient == nil {
return
}
-
d.httpClient.CloseIdleConnections()
}
diff --git a/modules/dnsdist/dnsdist_test.go b/modules/dnsdist/dnsdist_test.go
index 851d99016..845f4326b 100644
--- a/modules/dnsdist/dnsdist_test.go
+++ b/modules/dnsdist/dnsdist_test.go
@@ -3,6 +3,7 @@
package dnsdist
import (
+ "github.com/netdata/go.d.plugin/agent/module"
"net/http"
"net/http/httptest"
"os"
@@ -16,22 +17,27 @@ import (
)
var (
- v151JSONStat, _ = os.ReadFile("testdata/v1.5.1/jsonstat.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer151JSONStat, _ = os.ReadFile("testdata/v1.5.1/jsonstat.json")
)
-func Test_testDataIsCorrectlyReadAndValid(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v151JSONStat": v151JSONStat,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer151JSONStat": dataVer151JSONStat,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.IsType(t, (*DNSdist)(nil), New())
+func TestDNSdist_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DNSdist{}, dataConfigJSON, dataConfigYAML)
}
-func Test_Init(t *testing.T) {
+func TestDNSdist_Init(t *testing.T) {
tests := map[string]struct {
config Config
wantFail bool
@@ -68,25 +74,25 @@ func Test_Init(t *testing.T) {
ns.Config = test.config
if test.wantFail {
- assert.False(t, ns.Init())
+ assert.Error(t, ns.Init())
} else {
- assert.True(t, ns.Init())
+ assert.NoError(t, ns.Init())
}
})
}
}
-func Test_Charts(t *testing.T) {
+func TestDNSdist_Charts(t *testing.T) {
dist := New()
- require.True(t, dist.Init())
+ require.NoError(t, dist.Init())
assert.NotNil(t, dist.Charts())
}
-func Test_Cleanup(t *testing.T) {
+func TestDNSdist_Cleanup(t *testing.T) {
assert.NotPanics(t, New().Cleanup)
}
-func Test_Check(t *testing.T) {
+func TestDNSdist_Check(t *testing.T) {
tests := map[string]struct {
prepare func() (dist *DNSdist, cleanup func())
wantFail bool
@@ -113,18 +119,18 @@ func Test_Check(t *testing.T) {
t.Run(name, func(t *testing.T) {
dist, cleanup := test.prepare()
defer cleanup()
- require.True(t, dist.Init())
+ require.NoError(t, dist.Init())
if test.wantFail {
- assert.False(t, dist.Check())
+ assert.Error(t, dist.Check())
} else {
- assert.True(t, dist.Check())
+ assert.NoError(t, dist.Check())
}
})
}
}
-func Test_Collect(t *testing.T) {
+func TestDNSdist_Collect(t *testing.T) {
tests := map[string]struct {
prepare func() (dist *DNSdist, cleanup func())
wantCollected map[string]int64
@@ -181,7 +187,7 @@ func Test_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
dist, cleanup := test.prepare()
defer cleanup()
- require.True(t, dist.Init())
+ require.NoError(t, dist.Init())
collected := dist.Collect()
@@ -251,7 +257,7 @@ func preparePowerDNSDistEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.String() {
case "/jsonstat?command=stats":
- _, _ = w.Write(v151JSONStat)
+ _, _ = w.Write(dataVer151JSONStat)
default:
w.WriteHeader(http.StatusNotFound)
}
diff --git a/modules/dnsdist/init.go b/modules/dnsdist/init.go
index d58891681..41c92edc6 100644
--- a/modules/dnsdist/init.go
+++ b/modules/dnsdist/init.go
@@ -10,7 +10,7 @@ import (
"github.com/netdata/go.d.plugin/pkg/web"
)
-func (d DNSdist) validateConfig() error {
+func (d *DNSdist) validateConfig() error {
if d.URL == "" {
return errors.New("URL not set")
}
@@ -22,10 +22,10 @@ func (d DNSdist) validateConfig() error {
return nil
}
-func (d DNSdist) initHTTPClient() (*http.Client, error) {
+func (d *DNSdist) initHTTPClient() (*http.Client, error) {
return web.NewHTTPClient(d.Client)
}
-func (d DNSdist) initCharts() (*module.Charts, error) {
+func (d *DNSdist) initCharts() (*module.Charts, error) {
return charts.Copy(), nil
}
diff --git a/modules/dnsdist/testdata/config.json b/modules/dnsdist/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/dnsdist/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/dnsdist/testdata/config.yaml b/modules/dnsdist/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/dnsdist/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/dnsmasq/config_schema.json b/modules/dnsmasq/config_schema.json
index d08819917..e80b7b5c2 100644
--- a/modules/dnsmasq/config_schema.json
+++ b/modules/dnsmasq/config_schema.json
@@ -1,26 +1,48 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/dnsmasq job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Dnsmasq collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "Dnsmasq address. The format is IP:PORT.",
+ "type": "string",
+ "default": "127.0.0.1:53"
+ },
+ "protocol": {
+ "title": "Protocol",
+ "description": "DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls.",
+ "type": "string",
+ "enum": [
+ "udp",
+ "tcp",
+ "tcp-tls"
+ ],
+ "default": "udp"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection timeout in seconds.",
+ "type": "number",
+ "default": 1
+ }
},
- "protocol": {
- "type": "string"
- },
- "address": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- }
+ "required": [
+ "address",
+ "protocol"
+ ]
},
- "required": [
- "name",
- "address"
- ]
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
}
diff --git a/modules/dnsmasq/dnsmasq.go b/modules/dnsmasq/dnsmasq.go
index 33e252b09..3317251af 100644
--- a/modules/dnsmasq/dnsmasq.go
+++ b/modules/dnsmasq/dnsmasq.go
@@ -4,6 +4,7 @@ package dnsmasq
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -27,7 +28,7 @@ func New() *Dnsmasq {
Config: Config{
Protocol: "udp",
Address: "127.0.0.1:53",
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
newDNSClient: func(network string, timeout time.Duration) dnsClient {
@@ -40,53 +41,66 @@ func New() *Dnsmasq {
}
type Config struct {
- Protocol string `yaml:"protocol"`
- Address string `yaml:"address"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Protocol string `yaml:"protocol" json:"protocol"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type (
Dnsmasq struct {
module.Base
- Config `yaml:",inline"`
-
- newDNSClient func(network string, timeout time.Duration) dnsClient
- dnsClient dnsClient
+ Config `yaml:",inline" json:""`
charts *module.Charts
- }
+ dnsClient dnsClient
+ newDNSClient func(network string, timeout time.Duration) dnsClient
+ }
dnsClient interface {
Exchange(msg *dns.Msg, address string) (resp *dns.Msg, rtt time.Duration, err error)
}
)
-func (d *Dnsmasq) Init() bool {
+func (d *Dnsmasq) Configuration() any {
+ return d.Config
+}
+
+func (d *Dnsmasq) Init() error {
err := d.validateConfig()
if err != nil {
d.Errorf("config validation: %v", err)
- return false
+ return err
}
client, err := d.initDNSClient()
if err != nil {
d.Errorf("init DNS client: %v", err)
- return false
+ return err
}
d.dnsClient = client
charts, err := d.initCharts()
if err != nil {
d.Errorf("init charts: %v", err)
- return false
+ return err
}
d.charts = charts
- return true
+ return nil
}
-func (d *Dnsmasq) Check() bool {
- return len(d.Collect()) > 0
+func (d *Dnsmasq) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (d *Dnsmasq) Charts() *module.Charts {
@@ -105,4 +119,4 @@ func (d *Dnsmasq) Collect() map[string]int64 {
return ms
}
-func (Dnsmasq) Cleanup() {}
+func (d *Dnsmasq) Cleanup() {}
diff --git a/modules/dnsmasq/dnsmasq_test.go b/modules/dnsmasq/dnsmasq_test.go
index b4f0bb555..647de16d9 100644
--- a/modules/dnsmasq/dnsmasq_test.go
+++ b/modules/dnsmasq/dnsmasq_test.go
@@ -5,16 +5,33 @@ package dnsmasq
import (
"errors"
"fmt"
+ "os"
"testing"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/miekg/dns"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Dnsmasq)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDnsmasq_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Dnsmasq{}, dataConfigJSON, dataConfigYAML)
}
func TestDnsmasq_Init(t *testing.T) {
@@ -54,9 +71,9 @@ func TestDnsmasq_Init(t *testing.T) {
ns.Config = test.config
if test.wantFail {
- assert.False(t, ns.Init())
+ assert.Error(t, ns.Init())
} else {
- assert.True(t, ns.Init())
+ assert.NoError(t, ns.Init())
}
})
}
@@ -83,12 +100,12 @@ func TestDnsmasq_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
dnsmasq := test.prepare()
- require.True(t, dnsmasq.Init())
+ require.NoError(t, dnsmasq.Init())
if test.wantFail {
- assert.False(t, dnsmasq.Check())
+ assert.Error(t, dnsmasq.Check())
} else {
- assert.True(t, dnsmasq.Check())
+ assert.NoError(t, dnsmasq.Check())
}
})
}
@@ -96,7 +113,7 @@ func TestDnsmasq_Check(t *testing.T) {
func TestDnsmasq_Charts(t *testing.T) {
dnsmasq := New()
- require.True(t, dnsmasq.Init())
+ require.NoError(t, dnsmasq.Init())
assert.NotNil(t, dnsmasq.Charts())
}
@@ -133,7 +150,7 @@ func TestDnsmasq_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
dnsmasq := test.prepare()
- require.True(t, dnsmasq.Init())
+ require.NoError(t, dnsmasq.Init())
collected := dnsmasq.Collect()
diff --git a/modules/dnsmasq/init.go b/modules/dnsmasq/init.go
index 2ce4790ae..9ceb3ead5 100644
--- a/modules/dnsmasq/init.go
+++ b/modules/dnsmasq/init.go
@@ -9,7 +9,7 @@ import (
"github.com/netdata/go.d.plugin/agent/module"
)
-func (d Dnsmasq) validateConfig() error {
+func (d *Dnsmasq) validateConfig() error {
if d.Address == "" {
return errors.New("'address' parameter not set")
}
@@ -19,11 +19,11 @@ func (d Dnsmasq) validateConfig() error {
return nil
}
-func (d Dnsmasq) initDNSClient() (dnsClient, error) {
- return d.newDNSClient(d.Protocol, d.Timeout.Duration), nil
+func (d *Dnsmasq) initDNSClient() (dnsClient, error) {
+ return d.newDNSClient(d.Protocol, d.Timeout.Duration()), nil
}
-func (d Dnsmasq) initCharts() (*module.Charts, error) {
+func (d *Dnsmasq) initCharts() (*module.Charts, error) {
return cacheCharts.Copy(), nil
}
diff --git a/modules/dnsmasq/testdata/config.json b/modules/dnsmasq/testdata/config.json
new file mode 100644
index 000000000..4fff563b8
--- /dev/null
+++ b/modules/dnsmasq/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "protocol": "ok",
+ "address": "ok",
+ "timeout": 123.123
+}
diff --git a/modules/dnsmasq/testdata/config.yaml b/modules/dnsmasq/testdata/config.yaml
new file mode 100644
index 000000000..1a79b8773
--- /dev/null
+++ b/modules/dnsmasq/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+protocol: "ok"
+address: "ok"
+timeout: 123.123
diff --git a/modules/dnsmasq_dhcp/config_schema.json b/modules/dnsmasq_dhcp/config_schema.json
index bb9d76813..e4143f889 100644
--- a/modules/dnsmasq_dhcp/config_schema.json
+++ b/modules/dnsmasq_dhcp/config_schema.json
@@ -1,23 +1,42 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/dnsmasq_dhcp job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Dnsmasq DHCP collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "leases_path": {
+ "title": "Leases path",
+ "description": "Path to Dnsmasq DHCP leases file.",
+ "type": "string",
+ "default": "/var/lib/misc/dnsmasq.leases"
+ },
+ "conf_path": {
+ "title": "Config path",
+ "description": "Path to Dnsmasq configuration file.",
+ "type": "string",
+ "default": "/etc/dnsmasq.conf"
+ },
+ "conf_dir": {
+ "title": "Config directory path",
+ "description": "Path to Dnsmasq configuration directory.",
+ "type": "string",
+ "default": "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new"
+ }
},
- "leases_path": {
- "type": "string"
- },
- "conf_path": {
- "type": "string"
- },
- "conf_dir": {
- "type": "string"
- }
+ "required": [
+ "leases_path"
+ ]
},
- "required": [
- "name",
- "leases_path"
- ]
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
}
diff --git a/modules/dnsmasq_dhcp/dhcp.go b/modules/dnsmasq_dhcp/dhcp.go
index ede8a8ee8..59b058812 100644
--- a/modules/dnsmasq_dhcp/dhcp.go
+++ b/modules/dnsmasq_dhcp/dhcp.go
@@ -4,6 +4,7 @@ package dnsmasq_dhcp
import (
_ "embed"
+ "errors"
"net"
"time"
@@ -22,15 +23,13 @@ func init() {
}
func New() *DnsmasqDHCP {
- config := Config{
- // debian defaults
- LeasesPath: "/var/lib/misc/dnsmasq.leases",
- ConfPath: "/etc/dnsmasq.conf",
- ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new",
- }
-
return &DnsmasqDHCP{
- Config: config,
+ Config: Config{
+ // debian defaults
+ LeasesPath: "/var/lib/misc/dnsmasq.leases",
+ ConfPath: "/etc/dnsmasq.conf",
+ ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new",
+ },
charts: charts.Copy(),
parseConfigEvery: time.Minute,
cacheDHCPRanges: make(map[string]bool),
@@ -39,45 +38,56 @@ func New() *DnsmasqDHCP {
}
type Config struct {
- LeasesPath string `yaml:"leases_path"`
- ConfPath string `yaml:"conf_path"`
- ConfDir string `yaml:"conf_dir"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ LeasesPath string `yaml:"leases_path" json:"leases_path"`
+ ConfPath string `yaml:"conf_path" json:"conf_path"`
+ ConfDir string `yaml:"conf_dir" json:"conf_dir"`
}
type DnsmasqDHCP struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- leasesModTime time.Time
-
+ leasesModTime time.Time
parseConfigTime time.Time
parseConfigEvery time.Duration
-
- dhcpRanges []iprange.Range
- dhcpHosts []net.IP
-
- cacheDHCPRanges map[string]bool
+ dhcpRanges []iprange.Range
+ dhcpHosts []net.IP
+ cacheDHCPRanges map[string]bool
mx map[string]int64
}
-func (d *DnsmasqDHCP) Init() bool {
+func (d *DnsmasqDHCP) Configuration() any {
+ return d.Config
+}
+
+func (d *DnsmasqDHCP) Init() error {
if err := d.validateConfig(); err != nil {
d.Errorf("config validation: %v", err)
- return false
+ return err
}
if err := d.checkLeasesPath(); err != nil {
d.Errorf("leases path check: %v", err)
- return false
+ return err
}
- return true
+ return nil
}
-func (d *DnsmasqDHCP) Check() bool {
- return len(d.Collect()) > 0
+func (d *DnsmasqDHCP) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (d *DnsmasqDHCP) Charts() *module.Charts {
diff --git a/modules/dnsmasq_dhcp/dhcp_test.go b/modules/dnsmasq_dhcp/dhcp_test.go
index 9e7693fa9..e39d937ba 100644
--- a/modules/dnsmasq_dhcp/dhcp_test.go
+++ b/modules/dnsmasq_dhcp/dhcp_test.go
@@ -3,22 +3,37 @@
package dnsmasq_dhcp
import (
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
const (
testLeasesPath = "testdata/dnsmasq.leases"
testConfPath = "testdata/dnsmasq.conf"
testConfDir = "testdata/dnsmasq.d"
)
-func TestNew(t *testing.T) {
- job := New()
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
- assert.IsType(t, (*DnsmasqDHCP)(nil), job)
+func TestDnsmasqDHCP_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DnsmasqDHCP{}, dataConfigJSON, dataConfigYAML)
}
func TestDnsmasqDHCP_Init(t *testing.T) {
@@ -27,14 +42,14 @@ func TestDnsmasqDHCP_Init(t *testing.T) {
job.ConfPath = testConfPath
job.ConfDir = testConfDir
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
}
func TestDnsmasqDHCP_InitEmptyLeasesPath(t *testing.T) {
job := New()
job.LeasesPath = ""
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) {
@@ -42,7 +57,7 @@ func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) {
job.LeasesPath = testLeasesPath
job.LeasesPath += "!"
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) {
@@ -51,7 +66,7 @@ func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) {
job.ConfPath = "testdata/dnsmasq3.conf"
job.ConfDir = ""
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
}
func TestDnsmasqDHCP_Check(t *testing.T) {
@@ -60,8 +75,8 @@ func TestDnsmasqDHCP_Check(t *testing.T) {
job.ConfPath = testConfPath
job.ConfDir = testConfDir
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestDnsmasqDHCP_Charts(t *testing.T) {
@@ -70,7 +85,7 @@ func TestDnsmasqDHCP_Charts(t *testing.T) {
job.ConfPath = testConfPath
job.ConfDir = testConfDir
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.NotNil(t, job.Charts())
}
@@ -85,8 +100,8 @@ func TestDnsmasqDHCP_Collect(t *testing.T) {
job.ConfPath = testConfPath
job.ConfDir = testConfDir
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"dhcp_range_1230::1-1230::64_allocated_leases": 7,
@@ -126,8 +141,8 @@ func TestDnsmasqDHCP_CollectFailedToOpenLeasesPath(t *testing.T) {
job.ConfPath = testConfPath
job.ConfDir = testConfDir
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
job.LeasesPath = ""
assert.Nil(t, job.Collect())
diff --git a/modules/dnsmasq_dhcp/testdata/config.json b/modules/dnsmasq_dhcp/testdata/config.json
new file mode 100644
index 000000000..6df6faec6
--- /dev/null
+++ b/modules/dnsmasq_dhcp/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "leases_path": "ok",
+ "conf_path": "ok",
+ "conf_dir": "ok"
+}
diff --git a/modules/dnsmasq_dhcp/testdata/config.yaml b/modules/dnsmasq_dhcp/testdata/config.yaml
new file mode 100644
index 000000000..4944cf3a6
--- /dev/null
+++ b/modules/dnsmasq_dhcp/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+leases_path: "ok"
+conf_path: "ok"
+conf_dir: "ok"
+
diff --git a/modules/dnsquery/collect.go b/modules/dnsquery/collect.go
index 46104e944..a98e37cad 100644
--- a/modules/dnsquery/collect.go
+++ b/modules/dnsquery/collect.go
@@ -14,7 +14,7 @@ import (
func (d *DNSQuery) collect() (map[string]int64, error) {
if d.dnsClient == nil {
- d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration)
+ d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration())
}
mx := make(map[string]int64)
diff --git a/modules/dnsquery/config_schema.json b/modules/dnsquery/config_schema.json
index 4a7fa412a..3b702e6b0 100644
--- a/modules/dnsquery/config_schema.json
+++ b/modules/dnsquery/config_schema.json
@@ -1,48 +1,104 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/dns_query job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "domains": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "servers": {
- "type": "array",
- "items": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "DNS query collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "DNS query timeout.",
+ "type": "number",
+ "default": 2
+ },
+ "servers": {
+ "title": "Servers",
+ "description": "List of DNS servers that will be queried.",
+ "type": "array",
+ "items": {
+ "title": "DNS server",
+ "description": "IP address or hostname of the DNS server.",
+ "type": "string"
+ },
+ "default": [
+ "8.8.8.8"
+ ],
+ "uniqueItems": true
+ },
+ "network": {
+ "title": "Protocol",
+ "description": "Network protocol. Supported protocols: udp, tcp, tcp-tls.",
+ "type": "string",
+ "enum": [
+ "udp",
+ "tcp",
+ "tcp-tls"
+ ],
+ "default": "udp"
+ },
+ "port": {
+ "title": "Port",
+ "description": "DNS server port.",
+ "type": "integer",
+ "default": 53
+ },
+ "domains": {
+ "title": "Domains",
+ "description": "Domain or subdomains to query. At each iteration, a random domain will be selected from the list.",
+ "type": "array",
+ "items": {
+ "title": "Domain",
+ "type": "string"
+ },
+ "default": [
+ "google.com",
+ "github.com"
+ ],
+ "uniqueItems": true
+ },
+ "record_types": {
+ "type": "array",
+ "items": {
+ "type": "string",
+ "enum": [
+ "A",
+ "AAAA",
+ "CNAME",
+ "MX",
+ "NS",
+ "PTR",
+ "TXT",
+ "SOA",
+ "SPF",
+ "TXT",
+ "SRV"
+ ],
+ "default": "A"
+ },
+ "default": [
+ "A"
+ ],
+ "uniqueItems": true
}
},
- "network": {
- "type": "string"
- },
- "record_type": {
- "type": "string"
- },
- "record_types": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "port": {
- "type": "integer"
+ "required": [
+ "domains",
+ "servers",
+ "network"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "domains",
- "servers"
- ]
+ }
}
diff --git a/modules/dnsquery/dnsquery.go b/modules/dnsquery/dnsquery.go
index dd1cd3c66..107916b26 100644
--- a/modules/dnsquery/dnsquery.go
+++ b/modules/dnsquery/dnsquery.go
@@ -28,7 +28,7 @@ func init() {
func New() *DNSQuery {
return &DNSQuery{
Config: Config{
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second * 2),
Network: "udp",
RecordTypes: []string{"A"},
Port: 53,
@@ -43,59 +43,62 @@ func New() *DNSQuery {
}
type Config struct {
- Domains []string `yaml:"domains"`
- Servers []string `yaml:"servers"`
- Network string `yaml:"network"`
- RecordType string `yaml:"record_type"`
- RecordTypes []string `yaml:"record_types"`
- Port int `yaml:"port"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ Domains []string `yaml:"domains" json:"domains"`
+ Servers []string `yaml:"servers" json:"servers"`
+ Network string `yaml:"network" json:"network"`
+ RecordType string `yaml:"record_type" json:"record_type"`
+ RecordTypes []string `yaml:"record_types" json:"record_types"`
+ Port int `yaml:"port" json:"port"`
}
type (
DNSQuery struct {
module.Base
-
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
+ dnsClient dnsClient
newDNSClient func(network string, duration time.Duration) dnsClient
- recordTypes map[string]uint16
- dnsClient dnsClient
+ recordTypes map[string]uint16
}
-
dnsClient interface {
Exchange(msg *dns.Msg, address string) (response *dns.Msg, rtt time.Duration, err error)
}
)
-func (d *DNSQuery) Init() bool {
+func (d *DNSQuery) Configuration() any {
+ return d.Config
+}
+
+func (d *DNSQuery) Init() error {
if err := d.verifyConfig(); err != nil {
d.Errorf("config validation: %v", err)
- return false
+ return err
}
rt, err := d.initRecordTypes()
if err != nil {
d.Errorf("init record type: %v", err)
- return false
+ return err
}
d.recordTypes = rt
charts, err := d.initCharts()
if err != nil {
d.Errorf("init charts: %v", err)
- return false
+ return err
}
d.charts = charts
- return true
+ return nil
}
-func (d *DNSQuery) Check() bool {
- return true
+func (d *DNSQuery) Check() error {
+ return nil
}
func (d *DNSQuery) Charts() *module.Charts {
diff --git a/modules/dnsquery/dnsquery_test.go b/modules/dnsquery/dnsquery_test.go
index 5ba841731..c454d98ec 100644
--- a/modules/dnsquery/dnsquery_test.go
+++ b/modules/dnsquery/dnsquery_test.go
@@ -4,6 +4,7 @@ package dnsquery
import (
"errors"
+ "os"
"testing"
"time"
@@ -15,8 +16,22 @@ import (
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDNSQuery_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DNSQuery{}, dataConfigJSON, dataConfigYAML)
}
func TestDNSQuery_Init(t *testing.T) {
@@ -32,7 +47,7 @@ func TestDNSQuery_Init(t *testing.T) {
Network: "udp",
RecordTypes: []string{"A"},
Port: 53,
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
"success when using deprecated record_type": {
@@ -43,7 +58,7 @@ func TestDNSQuery_Init(t *testing.T) {
Network: "udp",
RecordType: "A",
Port: 53,
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
"fail with default": {
@@ -58,7 +73,7 @@ func TestDNSQuery_Init(t *testing.T) {
Network: "udp",
RecordTypes: []string{"A"},
Port: 53,
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
"fail when servers not set": {
@@ -69,7 +84,7 @@ func TestDNSQuery_Init(t *testing.T) {
Network: "udp",
RecordTypes: []string{"A"},
Port: 53,
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
"fail when network is invalid": {
@@ -80,7 +95,7 @@ func TestDNSQuery_Init(t *testing.T) {
Network: "gcp",
RecordTypes: []string{"A"},
Port: 53,
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
"fail when record_type is invalid": {
@@ -91,7 +106,7 @@ func TestDNSQuery_Init(t *testing.T) {
Network: "udp",
RecordTypes: []string{"B"},
Port: 53,
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
}
@@ -102,9 +117,9 @@ func TestDNSQuery_Init(t *testing.T) {
dq.Config = test.config
if test.wantFail {
- assert.False(t, dq.Init())
+ assert.Error(t, dq.Init())
} else {
- assert.True(t, dq.Init())
+ assert.NoError(t, dq.Init())
}
})
}
@@ -129,12 +144,12 @@ func TestDNSQuery_Check(t *testing.T) {
t.Run(name, func(t *testing.T) {
dq := test.prepare()
- require.True(t, dq.Init())
+ require.NoError(t, dq.Init())
if test.wantFail {
- assert.False(t, dq.Check())
+ assert.Error(t, dq.Check())
} else {
- assert.True(t, dq.Check())
+ assert.NoError(t, dq.Check())
}
})
}
@@ -145,7 +160,7 @@ func TestDNSQuery_Charts(t *testing.T) {
dq.Domains = []string{"google.com"}
dq.Servers = []string{"192.0.2.0", "192.0.2.1"}
- require.True(t, dq.Init())
+ require.NoError(t, dq.Init())
assert.NotNil(t, dq.Charts())
assert.Len(t, *dq.Charts(), len(dnsChartsTmpl)*len(dq.Servers))
@@ -186,7 +201,7 @@ func TestDNSQuery_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
dq := test.prepare()
- require.True(t, dq.Init())
+ require.NoError(t, dq.Init())
mx := dq.Collect()
diff --git a/modules/dnsquery/testdata/config.json b/modules/dnsquery/testdata/config.json
new file mode 100644
index 000000000..b16ed18c6
--- /dev/null
+++ b/modules/dnsquery/testdata/config.json
@@ -0,0 +1,16 @@
+{
+ "update_every": 123,
+ "domains": [
+ "ok"
+ ],
+ "servers": [
+ "ok"
+ ],
+ "network": "ok",
+ "record_type": "ok",
+ "record_types": [
+ "ok"
+ ],
+ "port": 123,
+ "timeout": 123.123
+}
diff --git a/modules/dnsquery/testdata/config.yaml b/modules/dnsquery/testdata/config.yaml
new file mode 100644
index 000000000..fdda7faed
--- /dev/null
+++ b/modules/dnsquery/testdata/config.yaml
@@ -0,0 +1,12 @@
+update_every: 123
+domains:
+ - "ok"
+servers:
+ - "ok"
+network: "ok"
+record_type: "ok"
+record_types:
+ - "ok"
+port: 123
+timeout: 123.123
+
diff --git a/modules/docker/collect.go b/modules/docker/collect.go
index ceda40671..fe4b6b45e 100644
--- a/modules/docker/collect.go
+++ b/modules/docker/collect.go
@@ -43,7 +43,7 @@ func (d *Docker) collect() (map[string]int64, error) {
}
func (d *Docker) collectInfo(mx map[string]int64) error {
- ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
defer cancel()
info, err := d.client.Info(ctx)
@@ -59,7 +59,7 @@ func (d *Docker) collectInfo(mx map[string]int64) error {
}
func (d *Docker) collectImages(mx map[string]int64) error {
- ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
defer cancel()
images, err := d.client.ImageList(ctx, types.ImageListOptions{})
@@ -106,7 +106,7 @@ func (d *Docker) collectContainers(mx map[string]int64) error {
for _, status := range containerHealthStatuses {
if err := func() error {
- ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
defer cancel()
v, err := d.client.ContainerList(ctx, types.ContainerListOptions{
@@ -191,7 +191,7 @@ func (d *Docker) collectContainers(mx map[string]int64) error {
}
func (d *Docker) negotiateAPIVersion() {
- ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration())
defer cancel()
d.client.NegotiateAPIVersion(ctx)
diff --git a/modules/docker/config_schema.json b/modules/docker/config_schema.json
index b060da819..ead4e18e6 100644
--- a/modules/docker/config_schema.json
+++ b/modules/docker/config_schema.json
@@ -1,26 +1,45 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/docker job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Docker collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "Docker daemon's listening address. When using a TCP socket, the format is: tcp://{ip}:{port}.",
+ "type": "string",
+ "default": "unix:///var/run/docker.sock"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection timeout.",
+ "type": "number",
+ "default": 2
+ },
+ "collect_container_size": {
+ "title": "Collect container size",
+ "description": "Collect container writable layer size.",
+ "type": "boolean",
+ "default": false
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "collect_container_size": {
- "type": "boolean"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/docker/docker.go b/modules/docker/docker.go
index 1078de2fb..af6851459 100644
--- a/modules/docker/docker.go
+++ b/modules/docker/docker.go
@@ -5,6 +5,7 @@ package docker
import (
"context"
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -28,7 +29,7 @@ func New() *Docker {
return &Docker{
Config: Config{
Address: docker.DefaultDockerHost,
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second * 2),
CollectContainerSize: false,
},
@@ -41,23 +42,24 @@ func New() *Docker {
}
type Config struct {
- Timeout web.Duration `yaml:"timeout"`
- Address string `yaml:"address"`
- CollectContainerSize bool `yaml:"collect_container_size"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ CollectContainerSize bool `yaml:"collect_container_size" json:"collect_container_size"`
}
type (
Docker struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- newClient func(Config) (dockerClient, error)
- client dockerClient
- verNegotiated bool
+ client dockerClient
+ newClient func(Config) (dockerClient, error)
- containers map[string]bool
+ verNegotiated bool
+ containers map[string]bool
}
dockerClient interface {
NegotiateAPIVersion(context.Context)
@@ -68,12 +70,25 @@ type (
}
)
-func (d *Docker) Init() bool {
- return true
+func (d *Docker) Configuration() any {
+ return d.Config
+}
+
+func (d *Docker) Init() error {
+ return nil
}
-func (d *Docker) Check() bool {
- return len(d.Collect()) > 0
+func (d *Docker) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (d *Docker) Charts() *module.Charts {
diff --git a/modules/docker/docker_test.go b/modules/docker/docker_test.go
index 0a3711b4d..03fe06d2c 100644
--- a/modules/docker/docker_test.go
+++ b/modules/docker/docker_test.go
@@ -5,13 +5,34 @@ package docker
import (
"context"
"errors"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/docker/docker/api/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDocker_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Docker{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestDocker_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -35,9 +56,9 @@ func TestDocker_Init(t *testing.T) {
d.Config = test.config
if test.wantFail {
- assert.False(t, d.Init())
+ assert.Error(t, d.Init())
} else {
- assert.True(t, d.Init())
+ assert.NoError(t, d.Init())
}
})
}
@@ -58,15 +79,15 @@ func TestDocker_Cleanup(t *testing.T) {
},
"after Init": {
wantClose: false,
- prepare: func(d *Docker) { d.Init() },
+ prepare: func(d *Docker) { _ = d.Init() },
},
"after Check": {
wantClose: true,
- prepare: func(d *Docker) { d.Init(); d.Check() },
+ prepare: func(d *Docker) { _ = d.Init(); _ = d.Check() },
},
"after Collect": {
wantClose: true,
- prepare: func(d *Docker) { d.Init(); d.Collect() },
+ prepare: func(d *Docker) { _ = d.Init(); d.Collect() },
},
}
@@ -136,12 +157,12 @@ func TestDocker_Check(t *testing.T) {
t.Run(name, func(t *testing.T) {
d := test.prepare()
- require.True(t, d.Init())
+ require.NoError(t, d.Init())
if test.wantFail {
- assert.False(t, d.Check())
+ assert.Error(t, d.Check())
} else {
- assert.True(t, d.Check())
+ assert.NoError(t, d.Check())
}
})
}
@@ -666,7 +687,7 @@ func TestDocker_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
d := test.prepare()
- require.True(t, d.Init())
+ require.NoError(t, d.Init())
mx := d.Collect()
diff --git a/modules/docker/metadata.yaml b/modules/docker/metadata.yaml
index 408e84a45..8fc6853a9 100644
--- a/modules/docker/metadata.yaml
+++ b/modules/docker/metadata.yaml
@@ -71,7 +71,7 @@ modules:
required: true
- name: timeout
description: Request timeout in seconds.
- default_value: 1
+ default_value: 2
required: false
- name: collect_container_size
description: Whether to collect container writable layer size.
diff --git a/modules/docker/testdata/config.json b/modules/docker/testdata/config.json
new file mode 100644
index 000000000..5e687448c
--- /dev/null
+++ b/modules/docker/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "collect_container_size": true
+}
diff --git a/modules/docker/testdata/config.yaml b/modules/docker/testdata/config.yaml
new file mode 100644
index 000000000..2b0f32225
--- /dev/null
+++ b/modules/docker/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+collect_container_size: yes
diff --git a/modules/docker_engine/config_schema.json b/modules/docker_engine/config_schema.json
index 2b8505610..3089779a3 100644
--- a/modules/docker_engine/config_schema.json
+++ b/modules/docker_engine/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/docker_engine job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Docker Engine collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Docker Engine metrics page to monitor.",
+ "type": "string",
+ "default": "http://127.0.0.1:9323/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/docker_engine/docker_engine.go b/modules/docker_engine/docker_engine.go
index 7c69daa29..6d5297ea4 100644
--- a/modules/docker_engine/docker_engine.go
+++ b/modules/docker_engine/docker_engine.go
@@ -7,10 +7,9 @@ import (
"errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -24,69 +23,69 @@ func init() {
}
func New() *DockerEngine {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:9323/metrics",
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ return &DockerEngine{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:9323/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
}
- return &DockerEngine{
- Config: config,
- }
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- }
- DockerEngine struct {
- module.Base
- Config `yaml:",inline"`
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
- prom prometheus.Prometheus
- isSwarmManager bool
- hasContainerStates bool
- }
-)
+type DockerEngine struct {
+ module.Base
+ Config `yaml:",inline" json:""`
-func (de DockerEngine) validateConfig() error {
- if de.URL == "" {
- return errors.New("URL is not set")
- }
- return nil
+ prom prometheus.Prometheus
+
+ isSwarmManager bool
+ hasContainerStates bool
}
-func (de *DockerEngine) initClient() error {
- client, err := web.NewHTTPClient(de.Client)
+func (de *DockerEngine) Configuration() any {
+ return de.Config
+}
+
+func (de *DockerEngine) Init() error {
+ if err := de.validateConfig(); err != nil {
+ de.Errorf("config validation: %v", err)
+ return err
+ }
+
+ prom, err := de.initPrometheusClient()
if err != nil {
+ de.Error(err)
return err
}
+ de.prom = prom
- de.prom = prometheus.New(client, de.Request)
return nil
}
-func (de *DockerEngine) Init() bool {
- if err := de.validateConfig(); err != nil {
- de.Errorf("config validation: %v", err)
- return false
- }
- if err := de.initClient(); err != nil {
- de.Errorf("client initialization: %v", err)
- return false
+func (de *DockerEngine) Check() error {
+ mx, err := de.collect()
+ if err != nil {
+ de.Error(err)
+ return err
}
- return true
-}
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
-func (de *DockerEngine) Check() bool {
- return len(de.Collect()) > 0
+ }
+ return nil
}
-func (de DockerEngine) Charts() *Charts {
+func (de *DockerEngine) Charts() *Charts {
cs := charts.Copy()
if !de.hasContainerStates {
if err := cs.Remove("engine_daemon_container_states_containers"); err != nil {
@@ -101,6 +100,7 @@ func (de DockerEngine) Charts() *Charts {
if err := cs.Add(*swarmManagerCharts.Copy()...); err != nil {
de.Warning(err)
}
+
return cs
}
@@ -117,4 +117,8 @@ func (de *DockerEngine) Collect() map[string]int64 {
return mx
}
-func (DockerEngine) Cleanup() {}
+func (de *DockerEngine) Cleanup() {
+ if de.prom != nil && de.prom.HTTPClient() != nil {
+ de.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/docker_engine/docker_engine_test.go b/modules/docker_engine/docker_engine_test.go
index 7ffc1ce5e..d70853563 100644
--- a/modules/docker_engine/docker_engine_test.go
+++ b/modules/docker_engine/docker_engine_test.go
@@ -8,30 +8,39 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- metricsNonDockerEngine, _ = os.ReadFile("testdata/non-docker-engine.txt")
- metricsV17050CE, _ = os.ReadFile("testdata/v17.05.0-ce.txt")
- metricsV18093CE, _ = os.ReadFile("testdata/v18.09.3-ce.txt")
- metricsV18093CESwarm, _ = os.ReadFile("testdata/v18.09.3-ce-swarm.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNonDockerEngineMetrics, _ = os.ReadFile("testdata/non-docker-engine.txt")
+ dataVer17050Metrics, _ = os.ReadFile("testdata/v17.05.0-ce.txt")
+ dataVer18093Metrics, _ = os.ReadFile("testdata/v18.09.3-ce.txt")
+ dataVer18093SwarmMetrics, _ = os.ReadFile("testdata/v18.09.3-ce-swarm.txt")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, metricsNonDockerEngine)
- assert.NotNil(t, metricsV17050CE)
- assert.NotNil(t, metricsV18093CE)
- assert.NotNil(t, metricsV18093CESwarm)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNonDockerEngineMetrics": dataNonDockerEngineMetrics,
+ "dataVer17050Metrics": dataVer17050Metrics,
+ "dataVer18093Metrics": dataVer18093Metrics,
+ "dataVer18093SwarmMetrics": dataVer18093SwarmMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestDockerEngine_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DockerEngine{}, dataConfigJSON, dataConfigYAML)
}
func TestDockerEngine_Cleanup(t *testing.T) {
@@ -64,9 +73,9 @@ func TestDockerEngine_Init(t *testing.T) {
dockerEngine.Config = test.config
if test.wantFail {
- assert.False(t, dockerEngine.Init())
+ assert.Error(t, dockerEngine.Init())
} else {
- assert.True(t, dockerEngine.Init())
+ assert.NoError(t, dockerEngine.Init())
}
})
}
@@ -92,9 +101,9 @@ func TestDockerEngine_Check(t *testing.T) {
defer srv.Close()
if test.wantFail {
- assert.False(t, dockerEngine.Check())
+ assert.Error(t, dockerEngine.Check())
} else {
- assert.True(t, dockerEngine.Check())
+ assert.NoError(t, dockerEngine.Check())
}
})
}
@@ -115,7 +124,7 @@ func TestDockerEngine_Charts(t *testing.T) {
dockerEngine, srv := test.prepare(t)
defer srv.Close()
- require.True(t, dockerEngine.Check())
+ require.NoError(t, dockerEngine.Check())
assert.Len(t, *dockerEngine.Charts(), test.wantNumCharts)
})
}
@@ -271,12 +280,12 @@ func prepareClientServerV17050CE(t *testing.T) (*DockerEngine, *httptest.Server)
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsV17050CE)
+ _, _ = w.Write(dataVer17050Metrics)
}))
dockerEngine := New()
dockerEngine.URL = srv.URL
- require.True(t, dockerEngine.Init())
+ require.NoError(t, dockerEngine.Init())
return dockerEngine, srv
}
@@ -285,12 +294,12 @@ func prepareClientServerV18093CE(t *testing.T) (*DockerEngine, *httptest.Server)
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsV18093CE)
+ _, _ = w.Write(dataVer18093Metrics)
}))
dockerEngine := New()
dockerEngine.URL = srv.URL
- require.True(t, dockerEngine.Init())
+ require.NoError(t, dockerEngine.Init())
return dockerEngine, srv
}
@@ -299,12 +308,12 @@ func prepareClientServerV18093CESwarm(t *testing.T) (*DockerEngine, *httptest.Se
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsV18093CESwarm)
+ _, _ = w.Write(dataVer18093SwarmMetrics)
}))
dockerEngine := New()
dockerEngine.URL = srv.URL
- require.True(t, dockerEngine.Init())
+ require.NoError(t, dockerEngine.Init())
return dockerEngine, srv
}
@@ -313,12 +322,12 @@ func prepareClientServerNonDockerEngine(t *testing.T) (*DockerEngine, *httptest.
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsNonDockerEngine)
+ _, _ = w.Write(dataNonDockerEngineMetrics)
}))
dockerEngine := New()
dockerEngine.URL = srv.URL
- require.True(t, dockerEngine.Init())
+ require.NoError(t, dockerEngine.Init())
return dockerEngine, srv
}
@@ -332,7 +341,7 @@ func prepareClientServerInvalidData(t *testing.T) (*DockerEngine, *httptest.Serv
dockerEngine := New()
dockerEngine.URL = srv.URL
- require.True(t, dockerEngine.Init())
+ require.NoError(t, dockerEngine.Init())
return dockerEngine, srv
}
@@ -346,7 +355,7 @@ func prepareClientServer404(t *testing.T) (*DockerEngine, *httptest.Server) {
dockerEngine := New()
dockerEngine.URL = srv.URL
- require.True(t, dockerEngine.Init())
+ require.NoError(t, dockerEngine.Init())
return dockerEngine, srv
}
@@ -357,7 +366,7 @@ func prepareClientServerConnectionRefused(t *testing.T) (*DockerEngine, *httptes
dockerEngine := New()
dockerEngine.URL = "http://127.0.0.1:38001/metrics"
- require.True(t, dockerEngine.Init())
+ require.NoError(t, dockerEngine.Init())
return dockerEngine, srv
}
diff --git a/modules/docker_engine/init.go b/modules/docker_engine/init.go
new file mode 100644
index 000000000..b3ceefdea
--- /dev/null
+++ b/modules/docker_engine/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package docker_engine
+
+import (
+ "errors"
+ "github.com/netdata/go.d.plugin/pkg/web"
+
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+)
+
+func (de *DockerEngine) validateConfig() error {
+ if de.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (de *DockerEngine) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(de.Client)
+ if err != nil {
+ return nil, err
+ }
+ return prometheus.New(client, de.Request), nil
+}
diff --git a/modules/docker_engine/testdata/config.json b/modules/docker_engine/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/docker_engine/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/docker_engine/testdata/config.yaml b/modules/docker_engine/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/docker_engine/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/dockerhub/config_schema.json b/modules/dockerhub/config_schema.json
index 1be293e6f..f07966c1b 100644
--- a/modules/dockerhub/config_schema.json
+++ b/modules/dockerhub/config_schema.json
@@ -1,65 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/dockerhub job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "DockerHub collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "default": "https://hub.docker.com/v2/repositories",
+ "title": "URL",
+ "description": "The URL of the DockerHub repositories endpoint.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "repositories": {
- "type": "array",
- "items": {
- "type": "number"
- }
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "repositories"
- ]
+ }
}
diff --git a/modules/dockerhub/dockerhub.go b/modules/dockerhub/dockerhub.go
index 48836a606..b86bd69e4 100644
--- a/modules/dockerhub/dockerhub.go
+++ b/modules/dockerhub/dockerhub.go
@@ -4,18 +4,11 @@ package dockerhub
import (
_ "embed"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
-)
-
-const (
- defaultURL = "https://hub.docker.com/v2/repositories"
- defaultHTTPTimeout = time.Second * 2
-
- defaultUpdateEvery = 5
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -25,80 +18,79 @@ func init() {
module.Register("dockerhub", module.Creator{
JobConfigSchema: configSchema,
Defaults: module.Defaults{
- UpdateEvery: defaultUpdateEvery,
+ UpdateEvery: 5,
},
Create: func() module.Module { return New() },
})
}
-// New creates DockerHub with default values.
func New() *DockerHub {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &DockerHub{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "https://hub.docker.com/v2/repositories",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 2),
+ },
},
},
}
- return &DockerHub{
- Config: config,
- }
}
-// Config is the DockerHub module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
- Repositories []string
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Repositories []string `yaml:"repositories" json:"repositories"`
}
-// DockerHub DockerHub module.
type DockerHub struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
client *apiClient
}
-// Cleanup makes cleanup.
-func (DockerHub) Cleanup() {}
-
-// Init makes initialization.
-func (dh *DockerHub) Init() bool {
- if dh.URL == "" {
- dh.Error("URL not set")
- return false
- }
+func (dh *DockerHub) Configuration() any {
+ return dh.Config
+}
- if len(dh.Repositories) == 0 {
- dh.Error("repositories parameter is not set")
- return false
+func (dh *DockerHub) Init() error {
+ if err := dh.validateConfig(); err != nil {
+ dh.Errorf("config validation: %v", err)
+ return err
}
- client, err := web.NewHTTPClient(dh.Client)
+ client, err := dh.initApiClient()
if err != nil {
- dh.Errorf("error on creating http client : %v", err)
- return false
+ dh.Error(err)
+ return err
}
- dh.client = newAPIClient(client, dh.Request)
+ dh.client = client
- return true
+ return nil
}
-// Check makes check.
-func (dh DockerHub) Check() bool {
- return len(dh.Collect()) > 0
+func (dh *DockerHub) Check() error {
+ mx, err := dh.collect()
+ if err != nil {
+ dh.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
-// Charts creates Charts.
-func (dh DockerHub) Charts() *Charts {
+func (dh *DockerHub) Charts() *Charts {
cs := charts.Copy()
addReposToCharts(dh.Repositories, cs)
return cs
}
-// Collect collects metrics.
func (dh *DockerHub) Collect() map[string]int64 {
mx, err := dh.collect()
@@ -109,3 +101,9 @@ func (dh *DockerHub) Collect() map[string]int64 {
return mx
}
+
+func (dh *DockerHub) Cleanup() {
+ if dh.client != nil && dh.client.httpClient != nil {
+ dh.client.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/modules/dockerhub/dockerhub_test.go b/modules/dockerhub/dockerhub_test.go
index 350af1a53..58188e3b7 100644
--- a/modules/dockerhub/dockerhub_test.go
+++ b/modules/dockerhub/dockerhub_test.go
@@ -9,24 +9,35 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- repo1Data, _ = os.ReadFile("testdata/repo1.txt")
- repo2Data, _ = os.ReadFile("testdata/repo2.txt")
- repo3Data, _ = os.ReadFile("testdata/repo3.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataRepo1, _ = os.ReadFile("testdata/repo1.txt")
+ dataRepo2, _ = os.ReadFile("testdata/repo2.txt")
+ dataRepo3, _ = os.ReadFile("testdata/repo3.txt")
)
-func TestNew(t *testing.T) {
- job := New()
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataRepo1": dataRepo1,
+ "dataRepo2": dataRepo2,
+ "dataRepo3": dataRepo3,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
- assert.IsType(t, (*DockerHub)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
- assert.Len(t, job.Repositories, 0)
- assert.Nil(t, job.client)
+func TestDockerHub_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DockerHub{}, dataConfigJSON, dataConfigYAML)
}
func TestDockerHub_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
@@ -36,11 +47,13 @@ func TestDockerHub_Cleanup(t *testing.T) { New().Cleanup() }
func TestDockerHub_Init(t *testing.T) {
job := New()
job.Repositories = []string{"name/repo"}
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
assert.NotNil(t, job.client)
}
-func TestDockerHub_InitNG(t *testing.T) { assert.False(t, New().Init()) }
+func TestDockerHub_InitNG(t *testing.T) {
+ assert.Error(t, New().Init())
+}
func TestDockerHub_Check(t *testing.T) {
ts := httptest.NewServer(
@@ -48,11 +61,11 @@ func TestDockerHub_Check(t *testing.T) {
func(w http.ResponseWriter, r *http.Request) {
switch {
case strings.HasSuffix(r.URL.Path, "name1/repo1"):
- _, _ = w.Write(repo1Data)
+ _, _ = w.Write(dataRepo1)
case strings.HasSuffix(r.URL.Path, "name2/repo2"):
- _, _ = w.Write(repo2Data)
+ _, _ = w.Write(dataRepo2)
case strings.HasSuffix(r.URL.Path, "name3/repo3"):
- _, _ = w.Write(repo3Data)
+ _, _ = w.Write(dataRepo3)
}
}))
defer ts.Close()
@@ -60,16 +73,16 @@ func TestDockerHub_Check(t *testing.T) {
job := New()
job.URL = ts.URL
job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestDockerHub_CheckNG(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/metrics"
job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestDockerHub_Collect(t *testing.T) {
@@ -78,11 +91,11 @@ func TestDockerHub_Collect(t *testing.T) {
func(w http.ResponseWriter, r *http.Request) {
switch {
case strings.HasSuffix(r.URL.Path, "name1/repo1"):
- _, _ = w.Write(repo1Data)
+ _, _ = w.Write(dataRepo1)
case strings.HasSuffix(r.URL.Path, "name2/repo2"):
- _, _ = w.Write(repo2Data)
+ _, _ = w.Write(dataRepo2)
case strings.HasSuffix(r.URL.Path, "name3/repo3"):
- _, _ = w.Write(repo3Data)
+ _, _ = w.Write(dataRepo3)
}
}))
defer ts.Close()
@@ -90,8 +103,8 @@ func TestDockerHub_Collect(t *testing.T) {
job := New()
job.URL = ts.URL
job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"star_count_user1/name1": 45,
@@ -127,8 +140,8 @@ func TestDockerHub_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL
job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestDockerHub_404(t *testing.T) {
@@ -141,6 +154,6 @@ func TestDockerHub_404(t *testing.T) {
job := New()
job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"}
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/dockerhub/init.go b/modules/dockerhub/init.go
new file mode 100644
index 000000000..17f2e712e
--- /dev/null
+++ b/modules/dockerhub/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package dockerhub
+
+import (
+ "errors"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (dh *DockerHub) validateConfig() error {
+ if dh.URL == "" {
+ return errors.New("url not set")
+ }
+ if len(dh.Repositories) == 0 {
+ return errors.New("repositories not set")
+ }
+ return nil
+}
+
+func (dh *DockerHub) initApiClient() (*apiClient, error) {
+ client, err := web.NewHTTPClient(dh.Client)
+ if err != nil {
+ return nil, err
+ }
+ return newAPIClient(client, dh.Request), nil
+}
diff --git a/modules/dockerhub/testdata/config.json b/modules/dockerhub/testdata/config.json
new file mode 100644
index 000000000..3496e747c
--- /dev/null
+++ b/modules/dockerhub/testdata/config.json
@@ -0,0 +1,23 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "repositories": [
+ "ok"
+ ]
+}
diff --git a/modules/dockerhub/testdata/config.yaml b/modules/dockerhub/testdata/config.yaml
new file mode 100644
index 000000000..733079f4b
--- /dev/null
+++ b/modules/dockerhub/testdata/config.yaml
@@ -0,0 +1,19 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+repositories:
+ - "ok"
\ No newline at end of file
diff --git a/modules/elasticsearch/config_schema.json b/modules/elasticsearch/config_schema.json
index f69eb6e43..2ff414ec5 100644
--- a/modules/elasticsearch/config_schema.json
+++ b/modules/elasticsearch/config_schema.json
@@ -1,74 +1,187 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/elasticsearch job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Elasticsearch collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Elasticsearch cluster.",
+ "type": "string",
+ "default": "http://127.0.0.1:9200"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "cluster_mode": {
+ "title": "Cluster mode",
+ "description": "Controls whether to collect metrics for all nodes in the Elasticsearch cluster or only for the local node where the collector is running.",
+ "type": "boolean",
+ "default": false
+ },
+ "collect_node_stats": {
+ "title": "Collect node stats",
+ "description": "Collect metrics about individual nodes in the cluster.",
+ "type": "boolean",
+ "default": true
+ },
+ "collect_cluster_health": {
+ "title": "Collect cluster health",
+ "description": "Collect metrics about the overall health of the cluster.",
+ "type": "boolean",
+ "default": true
+ },
+ "collect_cluster_stats": {
+ "title": "Collect cluster stats",
+ "description": "Collect high-level cluster statistics.",
+ "type": "boolean",
+ "default": true
+ },
+ "collect_indices_stats": {
+ "title": "Collect indices stats",
+ "description": "Collect metrics about individual indices in the cluster.",
+ "type": "boolean",
+ "default": false
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "cluster_mode",
+ "collect_node_stats",
+ "collect_cluster_health",
+ "collect_cluster_stats",
+ "collect_indices_stats"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "cluster_mode": {
- "type": "boolean"
- },
- "collect_node_stats": {
- "type": "boolean"
- },
- "collect_cluster_health": {
- "type": "boolean"
- },
- "collect_cluster_stats": {
- "type": "boolean"
- },
- "collect_indices_stats": {
- "type": "boolean"
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/elasticsearch/elasticsearch.go b/modules/elasticsearch/elasticsearch.go
index 4b29a6cc8..dd64064f5 100644
--- a/modules/elasticsearch/elasticsearch.go
+++ b/modules/elasticsearch/elasticsearch.go
@@ -4,13 +4,13 @@ package elasticsearch
import (
_ "embed"
+ "errors"
"net/http"
"sync"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -34,7 +34,7 @@ func New() *Elasticsearch {
URL: "http://127.0.0.1:9200",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second * 2),
},
},
ClusterMode: false,
@@ -54,49 +54,62 @@ func New() *Elasticsearch {
}
type Config struct {
- web.HTTP `yaml:",inline"`
- ClusterMode bool `yaml:"cluster_mode"`
- DoNodeStats bool `yaml:"collect_node_stats"`
- DoClusterHealth bool `yaml:"collect_cluster_health"`
- DoClusterStats bool `yaml:"collect_cluster_stats"`
- DoIndicesStats bool `yaml:"collect_indices_stats"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ ClusterMode bool `yaml:"cluster_mode" json:"cluster_mode"`
+ DoNodeStats bool `yaml:"collect_node_stats" json:"collect_node_stats"`
+ DoClusterHealth bool `yaml:"collect_cluster_health" json:"collect_cluster_health"`
+ DoClusterStats bool `yaml:"collect_cluster_stats" json:"collect_cluster_stats"`
+ DoIndicesStats bool `yaml:"collect_indices_stats" json:"collect_indices_stats"`
}
type Elasticsearch struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+ addClusterHealthChartsOnce *sync.Once
+ addClusterStatsChartsOnce *sync.Once
httpClient *http.Client
- charts *module.Charts
clusterName string
+ nodes map[string]bool
+ indices map[string]bool
+}
- addClusterHealthChartsOnce *sync.Once
- addClusterStatsChartsOnce *sync.Once
-
- nodes map[string]bool
- indices map[string]bool
+func (es *Elasticsearch) Configuration() any {
+ return es.Config
}
-func (es *Elasticsearch) Init() bool {
+func (es *Elasticsearch) Init() error {
err := es.validateConfig()
if err != nil {
es.Errorf("check configuration: %v", err)
- return false
+ return err
}
httpClient, err := es.initHTTPClient()
if err != nil {
es.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
es.httpClient = httpClient
- return true
+ return nil
}
-func (es *Elasticsearch) Check() bool {
- return len(es.Collect()) > 0
+func (es *Elasticsearch) Check() error {
+ mx, err := es.collect()
+ if err != nil {
+ es.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (es *Elasticsearch) Charts() *module.Charts {
diff --git a/modules/elasticsearch/elasticsearch_test.go b/modules/elasticsearch/elasticsearch_test.go
index d4f1628cd..06d2f7934 100644
--- a/modules/elasticsearch/elasticsearch_test.go
+++ b/modules/elasticsearch/elasticsearch_test.go
@@ -3,6 +3,7 @@
package elasticsearch
import (
+ "github.com/netdata/go.d.plugin/agent/module"
"net/http"
"net/http/httptest"
"os"
@@ -16,27 +17,36 @@ import (
)
var (
- v842NodesLocalStats, _ = os.ReadFile("testdata/v8.4.2/nodes_local_stats.json")
- v842NodesStats, _ = os.ReadFile("testdata/v8.4.2/nodes_stats.json")
- v842ClusterHealth, _ = os.ReadFile("testdata/v8.4.2/cluster_health.json")
- v842ClusterStats, _ = os.ReadFile("testdata/v8.4.2/cluster_stats.json")
- v842CatIndicesStats, _ = os.ReadFile("testdata/v8.4.2/cat_indices_stats.json")
- v842Info, _ = os.ReadFile("testdata/v8.4.2/info.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer842NodesLocalStats, _ = os.ReadFile("testdata/v8.4.2/nodes_local_stats.json")
+ dataVer842NodesStats, _ = os.ReadFile("testdata/v8.4.2/nodes_stats.json")
+ dataVer842ClusterHealth, _ = os.ReadFile("testdata/v8.4.2/cluster_health.json")
+ dataVer842ClusterStats, _ = os.ReadFile("testdata/v8.4.2/cluster_stats.json")
+ dataVer842CatIndicesStats, _ = os.ReadFile("testdata/v8.4.2/cat_indices_stats.json")
+ dataVer842Info, _ = os.ReadFile("testdata/v8.4.2/info.json")
)
-func Test_testDataIsCorrectlyReadAndValid(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v842NodesLocalStats": v842NodesLocalStats,
- "v842NodesStats": v842NodesStats,
- "v842ClusterHealth": v842ClusterHealth,
- "v842ClusterStats": v842ClusterStats,
- "v842CatIndicesStats": v842CatIndicesStats,
- "v842Info": v842Info,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer842NodesLocalStats": dataVer842NodesLocalStats,
+ "dataVer842NodesStats": dataVer842NodesStats,
+ "dataVer842ClusterHealth": dataVer842ClusterHealth,
+ "dataVer842ClusterStats": dataVer842ClusterStats,
+ "dataVer842CatIndicesStats": dataVer842CatIndicesStats,
+ "dataVer842Info": dataVer842Info,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestElasticsearch_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Elasticsearch{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestElasticsearch_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -103,9 +113,9 @@ func TestElasticsearch_Init(t *testing.T) {
es.Config = test.config
if test.wantFail {
- assert.False(t, es.Init())
+ assert.Error(t, es.Init())
} else {
- assert.True(t, es.Init())
+ assert.NoError(t, es.Init())
}
})
}
@@ -128,9 +138,9 @@ func TestElasticsearch_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, es.Check())
+ assert.Error(t, es.Check())
} else {
- assert.True(t, es.Check())
+ assert.NoError(t, es.Check())
}
})
}
@@ -666,7 +676,7 @@ func prepareElasticsearch(t *testing.T, createES func() *Elasticsearch) (es *Ela
es = createES()
es.URL = srv.URL
- require.True(t, es.Init())
+ require.NoError(t, es.Init())
return es, srv.Close
}
@@ -683,7 +693,7 @@ func prepareElasticsearchInvalidData(t *testing.T) (*Elasticsearch, func()) {
}))
es := New()
es.URL = srv.URL
- require.True(t, es.Init())
+ require.NoError(t, es.Init())
return es, srv.Close
}
@@ -696,7 +706,7 @@ func prepareElasticsearch404(t *testing.T) (*Elasticsearch, func()) {
}))
es := New()
es.URL = srv.URL
- require.True(t, es.Init())
+ require.NoError(t, es.Init())
return es, srv.Close
}
@@ -705,7 +715,7 @@ func prepareElasticsearchConnectionRefused(t *testing.T) (*Elasticsearch, func()
t.Helper()
es := New()
es.URL = "http://127.0.0.1:38001"
- require.True(t, es.Init())
+ require.NoError(t, es.Init())
return es, func() {}
}
@@ -715,17 +725,17 @@ func prepareElasticsearchEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathNodesStats:
- _, _ = w.Write(v842NodesStats)
+ _, _ = w.Write(dataVer842NodesStats)
case urlPathLocalNodeStats:
- _, _ = w.Write(v842NodesLocalStats)
+ _, _ = w.Write(dataVer842NodesLocalStats)
case urlPathClusterHealth:
- _, _ = w.Write(v842ClusterHealth)
+ _, _ = w.Write(dataVer842ClusterHealth)
case urlPathClusterStats:
- _, _ = w.Write(v842ClusterStats)
+ _, _ = w.Write(dataVer842ClusterStats)
case urlPathIndicesStats:
- _, _ = w.Write(v842CatIndicesStats)
+ _, _ = w.Write(dataVer842CatIndicesStats)
case "/":
- _, _ = w.Write(v842Info)
+ _, _ = w.Write(dataVer842Info)
default:
w.WriteHeader(http.StatusNotFound)
}
diff --git a/modules/elasticsearch/metadata.yaml b/modules/elasticsearch/metadata.yaml
index f8458e3f1..9ee892948 100644
--- a/modules/elasticsearch/metadata.yaml
+++ b/modules/elasticsearch/metadata.yaml
@@ -107,7 +107,7 @@ modules:
required: false
- name: timeout
description: HTTP request timeout.
- default_value: 5
+ default_value: 2
required: false
- name: username
description: Username for basic HTTP authentication.
diff --git a/modules/elasticsearch/testdata/config.json b/modules/elasticsearch/testdata/config.json
new file mode 100644
index 000000000..a456d1d56
--- /dev/null
+++ b/modules/elasticsearch/testdata/config.json
@@ -0,0 +1,25 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "cluster_mode": true,
+ "collect_node_stats": true,
+ "collect_cluster_health": true,
+ "collect_cluster_stats": true,
+ "collect_indices_stats": true
+}
diff --git a/modules/elasticsearch/testdata/config.yaml b/modules/elasticsearch/testdata/config.yaml
new file mode 100644
index 000000000..af1b4a136
--- /dev/null
+++ b/modules/elasticsearch/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+cluster_mode: yes
+collect_node_stats: yes
+collect_cluster_health: yes
+collect_cluster_stats: yes
+collect_indices_stats: yes
diff --git a/modules/energid/README.md b/modules/energid/README.md
deleted file mode 120000
index 894468aae..000000000
--- a/modules/energid/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/energi_core_wallet.md
\ No newline at end of file
diff --git a/modules/energid/charts.go b/modules/energid/charts.go
deleted file mode 100644
index 3dcc252af..000000000
--- a/modules/energid/charts.go
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package energid
-
-import "github.com/netdata/go.d.plugin/agent/module"
-
-var charts = module.Charts{
- // getblockchaininfo (blockchain processing)
- {
- ID: "blockindex",
- Title: "Blockchain index",
- Units: "count",
- Fam: "blockchain",
- Ctx: "energid.blockindex",
- Type: module.Area,
- Dims: module.Dims{
- {ID: "blockchain_blocks", Name: "blocks"},
- {ID: "blockchain_headers", Name: "headers"},
- },
- },
- {
- ID: "difficulty",
- Title: "Blockchain difficulty",
- Units: "difficulty",
- Fam: "blockchain",
- Ctx: "energid.difficulty",
- Dims: module.Dims{
- {ID: "blockchain_difficulty", Name: "difficulty", Div: 1000},
- },
- },
-
- // getmempoolinfo (state of the TX memory pool)
- {
- ID: "mempool",
- Title: "Memory pool",
- Units: "bytes",
- Fam: "memory",
- Ctx: "energid.mempool",
- Type: module.Area,
- Dims: module.Dims{
- {ID: "mempool_max", Name: "max"},
- {ID: "mempool_current", Name: "usage"},
- {ID: "mempool_txsize", Name: "tx_size"},
- },
- },
-
- // getmemoryinfo
- {
- ID: "secmem",
- Title: "Secure memory",
- Units: "bytes",
- Fam: "memory",
- Ctx: "energid.secmem",
- Type: module.Area,
- Dims: module.Dims{
- {ID: "secmem_total", Name: "total"},
- {ID: "secmem_used", Name: "used"},
- {ID: "secmem_free", Name: "free"},
- {ID: "secmem_locked", Name: "locked"},
- },
- },
-
- // getnetworkinfo (P2P networking)
- {
- ID: "network",
- Title: "Network",
- Units: "connections",
- Fam: "network",
- Ctx: "energid.network",
- Dims: module.Dims{
- {ID: "network_connections", Name: "connections"},
- },
- },
- {
- ID: "timeoffset",
- Title: "Network time offset",
- Units: "seconds",
- Fam: "network",
- Ctx: "energid.timeoffset",
- Dims: module.Dims{
- {ID: "network_timeoffset", Name: "timeoffset"},
- },
- },
-
- // gettxoutsetinfo (unspent transaction output set)
- {
- ID: "utxo_transactions",
- Title: "Transactions",
- Units: "transactions",
- Fam: "utxo",
- Ctx: "energid.utxo_transactions",
- Dims: module.Dims{
- {ID: "utxo_transactions", Name: "transactions"},
- {ID: "utxo_output_transactions", Name: "output_transactions"},
- },
- },
-}
diff --git a/modules/energid/collect.go b/modules/energid/collect.go
deleted file mode 100644
index 965ee4b36..000000000
--- a/modules/energid/collect.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package energid
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
-
- "github.com/netdata/go.d.plugin/pkg/stm"
- "github.com/netdata/go.d.plugin/pkg/web"
-)
-
-const (
- jsonRPCVersion = "1.1"
-
- methodGetBlockchainInfo = "getblockchaininfo"
- methodGetMemPoolInfo = "getmempoolinfo"
- methodGetNetworkInfo = "getnetworkinfo"
- methodGetTXOutSetInfo = "gettxoutsetinfo"
- methodGetMemoryInfo = "getmemoryinfo"
-)
-
-var infoRequests = rpcRequests{
- {JSONRPC: jsonRPCVersion, ID: 1, Method: methodGetBlockchainInfo},
- {JSONRPC: jsonRPCVersion, ID: 2, Method: methodGetMemPoolInfo},
- {JSONRPC: jsonRPCVersion, ID: 3, Method: methodGetNetworkInfo},
- {JSONRPC: jsonRPCVersion, ID: 4, Method: methodGetTXOutSetInfo},
- {JSONRPC: jsonRPCVersion, ID: 5, Method: methodGetMemoryInfo},
-}
-
-func (e *Energid) collect() (map[string]int64, error) {
- responses, err := e.scrapeEnergid(infoRequests)
- if err != nil {
- return nil, err
- }
-
- info, err := e.collectInfoResponse(infoRequests, responses)
- if err != nil {
- return nil, err
- }
-
- return stm.ToMap(info), nil
-}
-
-func (e *Energid) collectInfoResponse(requests rpcRequests, responses rpcResponses) (*energidInfo, error) {
- var info energidInfo
- for _, req := range requests {
- resp := responses.getByID(req.ID)
- if resp == nil {
- e.Warningf("method '%s' (id %d) not in responses", req.Method, req.ID)
- continue
- }
-
- if resp.Error != nil {
- e.Warningf("server returned an error on method '%s': %v", req.Method, resp.Error)
- continue
- }
-
- var err error
- switch req.Method {
- case methodGetBlockchainInfo:
- info.Blockchain, err = parseBlockchainInfo(resp.Result)
- case methodGetMemPoolInfo:
- info.MemPool, err = parseMemPoolInfo(resp.Result)
- case methodGetNetworkInfo:
- info.Network, err = parseNetworkInfo(resp.Result)
- case methodGetTXOutSetInfo:
- info.TxOutSet, err = parseTXOutSetInfo(resp.Result)
- case methodGetMemoryInfo:
- info.Memory, err = parseMemoryInfo(resp.Result)
- }
- if err != nil {
- return nil, fmt.Errorf("parse '%s' method result: %v", req.Method, err)
- }
- }
-
- return &info, nil
-}
-
-func parseBlockchainInfo(result []byte) (*blockchainInfo, error) {
- var m blockchainInfo
- if err := json.Unmarshal(result, &m); err != nil {
- return nil, err
- }
- return &m, nil
-}
-
-func parseMemPoolInfo(result []byte) (*memPoolInfo, error) {
- var m memPoolInfo
- if err := json.Unmarshal(result, &m); err != nil {
- return nil, err
- }
- return &m, nil
-}
-
-func parseNetworkInfo(result []byte) (*networkInfo, error) {
- var m networkInfo
- if err := json.Unmarshal(result, &m); err != nil {
- return nil, err
- }
- return &m, nil
-}
-
-func parseTXOutSetInfo(result []byte) (*txOutSetInfo, error) {
- var m txOutSetInfo
- if err := json.Unmarshal(result, &m); err != nil {
- return nil, err
- }
- return &m, nil
-}
-
-func parseMemoryInfo(result []byte) (*memoryInfo, error) {
- var m memoryInfo
- if err := json.Unmarshal(result, &m); err != nil {
- return nil, err
- }
- return &m, nil
-}
-
-func (e *Energid) scrapeEnergid(requests rpcRequests) (rpcResponses, error) {
- req, _ := web.NewHTTPRequest(e.Request)
- req.Method = http.MethodPost
- req.Header.Set("Content-Type", "application/json")
- body, _ := json.Marshal(requests)
- req.Body = io.NopCloser(bytes.NewReader(body))
-
- var resp rpcResponses
- if err := e.doOKDecode(req, &resp); err != nil {
- return nil, err
- }
-
- return resp, nil
-}
-
-func (e *Energid) doOKDecode(req *http.Request, in interface{}) error {
- resp, err := e.httpClient.Do(req)
- if err != nil {
- return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err)
- }
- defer closeBody(resp)
-
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode)
- }
-
- if err := json.NewDecoder(resp.Body).Decode(in); err != nil {
- return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err)
- }
-
- return nil
-}
-
-func closeBody(resp *http.Response) {
- if resp != nil && resp.Body != nil {
- _, _ = io.Copy(io.Discard, resp.Body)
- _ = resp.Body.Close()
- }
-}
diff --git a/modules/energid/config_schema.json b/modules/energid/config_schema.json
deleted file mode 100644
index 20f4ec9f8..000000000
--- a/modules/energid/config_schema.json
+++ /dev/null
@@ -1,59 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/energid job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
- }
- },
- "required": [
- "name",
- "url"
- ]
-}
diff --git a/modules/energid/energid.go b/modules/energid/energid.go
deleted file mode 100644
index fcffe50d8..000000000
--- a/modules/energid/energid.go
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package energid
-
-import (
- _ "embed"
- "net/http"
- "time"
-
- "github.com/netdata/go.d.plugin/agent/module"
- "github.com/netdata/go.d.plugin/pkg/web"
-)
-
-//go:embed "config_schema.json"
-var configSchema string
-
-func init() {
- module.Register("energid", module.Creator{
- JobConfigSchema: configSchema,
- Defaults: module.Defaults{
- UpdateEvery: 5,
- },
- Create: func() module.Module { return New() },
- })
-}
-
-type Config struct {
- web.HTTP `yaml:",inline"`
-}
-
-type Energid struct {
- module.Base
- Config `yaml:",inline"`
-
- httpClient *http.Client
- charts *module.Charts
-}
-
-func New() *Energid {
- return &Energid{
- Config: Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:9796",
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
- },
- },
- },
- }
-}
-
-func (e *Energid) Init() bool {
- err := e.validateConfig()
- if err != nil {
- e.Errorf("config validation: %v", err)
- return false
- }
-
- client, err := e.initHTTPClient()
- if err != nil {
- e.Errorf("init HTTP client: %v", err)
- return false
- }
- e.httpClient = client
-
- cs, err := e.initCharts()
- if err != nil {
- e.Errorf("init charts: %v", err)
- return false
- }
- e.charts = cs
-
- return true
-}
-
-func (e *Energid) Check() bool {
- return len(e.Collect()) > 0
-}
-
-func (e *Energid) Charts() *module.Charts {
- return e.charts
-}
-
-func (e *Energid) Collect() map[string]int64 {
- ms, err := e.collect()
- if err != nil {
- e.Error(err)
- }
-
- if len(ms) == 0 {
- return nil
- }
-
- return ms
-}
-
-func (e *Energid) Cleanup() {
- if e.httpClient == nil {
- return
- }
- e.httpClient.CloseIdleConnections()
-}
diff --git a/modules/energid/energid_test.go b/modules/energid/energid_test.go
deleted file mode 100644
index ab0e2f24e..000000000
--- a/modules/energid/energid_test.go
+++ /dev/null
@@ -1,285 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package energid
-
-import (
- "encoding/json"
- "io"
- "net/http"
- "net/http/httptest"
- "os"
- "testing"
-
- "github.com/netdata/go.d.plugin/pkg/tlscfg"
- "github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-var (
- v241GetBlockchainInfo, _ = os.ReadFile("testdata/v2.4.1/getblockchaininfo.json")
- v241GetMemPoolInfo, _ = os.ReadFile("testdata/v2.4.1/getmempoolinfo.json")
- v241GetNetworkInfo, _ = os.ReadFile("testdata/v2.4.1/getnetworkinfo.json")
- v241GetTXOutSetInfo, _ = os.ReadFile("testdata/v2.4.1/gettxoutsetinfo.json")
- v241GetMemoryInfo, _ = os.ReadFile("testdata/v2.4.1/getmemoryinfo.json")
-)
-
-func Test_Testdata(t *testing.T) {
- for name, data := range map[string][]byte{
- "v241GetBlockchainInfo": v241GetBlockchainInfo,
- "v241GetMemPoolInfo": v241GetMemPoolInfo,
- "v241GetNetworkInfo": v241GetNetworkInfo,
- "v241GetTXOutSetInfo": v241GetTXOutSetInfo,
- "v241GetMemoryInfo": v241GetMemoryInfo,
- } {
- require.NotNilf(t, data, name)
- }
-}
-
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Energid)(nil), New())
-}
-
-func Test_Init(t *testing.T) {
- tests := map[string]struct {
- config Config
- wantFail bool
- }{
- "success on default config": {
- config: New().Config,
- },
- "fails on unset URL": {
- wantFail: true,
- config: Config{
- HTTP: web.HTTP{
- Request: web.Request{URL: ""},
- },
- },
- },
- "fails on invalid TLSCA": {
- wantFail: true,
- config: Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:38001",
- },
- Client: web.Client{
- TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"},
- },
- },
- },
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- energid := New()
- energid.Config = test.config
-
- if test.wantFail {
- assert.False(t, energid.Init())
- } else {
- assert.True(t, energid.Init())
- }
- })
- }
-}
-
-func Test_Charts(t *testing.T) {
- energid := New()
- require.True(t, energid.Init())
- assert.NotNil(t, energid.Charts())
-}
-
-func Test_Cleanup(t *testing.T) {
- assert.NotPanics(t, New().Cleanup)
-}
-
-func Test_Check(t *testing.T) {
- tests := map[string]struct {
- prepare func() (energid *Energid, cleanup func())
- wantFail bool
- }{
- "success on valid v2.4.1 response": {
- prepare: prepareEnergidV241,
- },
- "fails on 404 response": {
- wantFail: true,
- prepare: prepareEnergid404,
- },
- "fails on connection refused": {
- wantFail: true,
- prepare: prepareEnergidConnectionRefused,
- },
- "fails on response with invalid data": {
- wantFail: true,
- prepare: prepareEnergidInvalidData,
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- energid, cleanup := test.prepare()
- defer cleanup()
-
- require.True(t, energid.Init())
-
- if test.wantFail {
- assert.False(t, energid.Check())
- } else {
- assert.True(t, energid.Check())
- }
- })
- }
-}
-
-func Test_Collect(t *testing.T) {
- tests := map[string]struct {
- prepare func() (energid *Energid, cleanup func())
- wantCollected map[string]int64
- }{
- "success on valid v2.4.1 response": {
- prepare: prepareEnergidV241,
- wantCollected: map[string]int64{
- "blockchain_blocks": 1,
- "blockchain_difficulty": 0,
- "blockchain_headers": 1,
- "mempool_current": 1,
- "mempool_max": 300000000,
- "mempool_txsize": 1,
- "network_connections": 1,
- "network_timeoffset": 1,
- "secmem_free": 65248,
- "secmem_locked": 65536,
- "secmem_total": 65536,
- "secmem_used": 288,
- "utxo_output_transactions": 1,
- "utxo_transactions": 1,
- },
- },
- "fails on 404 response": {
- prepare: prepareEnergid404,
- },
- "fails on connection refused": {
- prepare: prepareEnergidConnectionRefused,
- },
- "fails on response with invalid data": {
- prepare: prepareEnergidInvalidData,
- },
- }
-
- for name, test := range tests {
- t.Run(name, func(t *testing.T) {
- energid, cleanup := test.prepare()
- defer cleanup()
- require.True(t, energid.Init())
-
- collected := energid.Collect()
-
- assert.Equal(t, test.wantCollected, collected)
- if len(test.wantCollected) > 0 {
- ensureCollectedHasAllChartsDimsVarsIDs(t, energid, collected)
- }
- })
- }
-}
-
-func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, energid *Energid, ms map[string]int64) {
- for _, chart := range *energid.Charts() {
- if chart.Obsolete {
- continue
- }
- for _, dim := range chart.Dims {
- _, ok := ms[dim.ID]
- assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID)
- }
- for _, v := range chart.Vars {
- _, ok := ms[v.ID]
- assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID)
- }
- }
-}
-
-func prepareEnergidV241() (*Energid, func()) {
- srv := prepareEnergidEndPoint()
- energid := New()
- energid.URL = srv.URL
-
- return energid, srv.Close
-}
-
-func prepareEnergidInvalidData() (*Energid, func()) {
- srv := httptest.NewServer(http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write([]byte("Hello world!"))
- }))
- energid := New()
- energid.URL = srv.URL
-
- return energid, srv.Close
-}
-
-func prepareEnergid404() (*Energid, func()) {
- srv := httptest.NewServer(http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(http.StatusNotFound)
- }))
- energid := New()
- energid.URL = srv.URL
-
- return energid, srv.Close
-}
-
-func prepareEnergidConnectionRefused() (*Energid, func()) {
- energid := New()
- energid.URL = "http://127.0.0.1:38001"
-
- return energid, func() {}
-}
-
-func prepareEnergidEndPoint() *httptest.Server {
- return httptest.NewServer(http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- if r.Method != http.MethodPost {
- w.WriteHeader(http.StatusMethodNotAllowed)
- return
- }
-
- body, _ := io.ReadAll(r.Body)
- var requests rpcRequests
- if err := json.Unmarshal(body, &requests); err != nil || len(requests) == 0 {
- w.WriteHeader(http.StatusInternalServerError)
- return
- }
-
- var responses rpcResponses
- for _, req := range requests {
- resp := rpcResponse{JSONRPC: jsonRPCVersion, ID: req.ID}
- switch req.Method {
- case methodGetBlockchainInfo:
- resp.Result = prepareResult(v241GetBlockchainInfo)
- case methodGetMemPoolInfo:
- resp.Result = prepareResult(v241GetMemPoolInfo)
- case methodGetNetworkInfo:
- resp.Result = prepareResult(v241GetNetworkInfo)
- case methodGetTXOutSetInfo:
- resp.Result = prepareResult(v241GetTXOutSetInfo)
- case methodGetMemoryInfo:
- resp.Result = prepareResult(v241GetMemoryInfo)
- default:
- resp.Error = &rpcError{Code: -32601, Message: "Method not found"}
- }
- responses = append(responses, resp)
- }
-
- bs, _ := json.Marshal(responses)
- _, _ = w.Write(bs)
- }))
-}
-
-func prepareResult(resp []byte) json.RawMessage {
- var r rpcResponse
- _ = json.Unmarshal(resp, &r)
- return r.Result
-}
diff --git a/modules/energid/init.go b/modules/energid/init.go
deleted file mode 100644
index 3b7b7fb9e..000000000
--- a/modules/energid/init.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package energid
-
-import (
- "errors"
- "net/http"
-
- "github.com/netdata/go.d.plugin/agent/module"
- "github.com/netdata/go.d.plugin/pkg/web"
-)
-
-func (e Energid) validateConfig() error {
- if e.URL == "" {
- return errors.New("URL not set")
- }
-
- if _, err := web.NewHTTPRequest(e.Request); err != nil {
- return err
- }
-
- return nil
-}
-
-func (e Energid) initHTTPClient() (*http.Client, error) {
- return web.NewHTTPClient(e.Client)
-}
-
-func (e Energid) initCharts() (*module.Charts, error) {
- return charts.Copy(), nil
-}
diff --git a/modules/energid/integrations/energi_core_wallet.md b/modules/energid/integrations/energi_core_wallet.md
deleted file mode 100644
index 405123277..000000000
--- a/modules/energid/integrations/energi_core_wallet.md
+++ /dev/null
@@ -1,224 +0,0 @@
-
-
-# Energi Core Wallet
-
-
-
-
-
-Plugin: go.d.plugin
-Module: apache
-
-
-
-## Overview
-
-This module monitors Energi Core Wallet instances.
-Works only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet).
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Energi Core Wallet instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| energid.blockindex | blocks, headers | count |
-| energid.difficulty | difficulty | difficulty |
-| energid.mempool | max, usage, tx_size | bytes |
-| energid.secmem | total, used, free, locked | bytes |
-| energid.network | connections | connections |
-| energid.timeoffset | timeoffset | seconds |
-| energid.utxo_transactions | transactions, output_transactions | transactions |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-No action required.
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `go.d/energid.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config go.d/energid.conf
-```
-#### Options
-
-The following options can be defined globally: update_every, autodetection_retry.
-
-
-Config options
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Data collection frequency. | 1 | no |
-| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
-| url | Server URL. | http://127.0.0.1:9796 | yes |
-| timeout | HTTP request timeout. | 1 | no |
-| username | Username for basic HTTP authentication. | | no |
-| password | Password for basic HTTP authentication. | | no |
-| proxy_url | Proxy URL. | | no |
-| proxy_username | Username for proxy basic HTTP authentication. | | no |
-| proxy_password | Password for proxy basic HTTP authentication. | | no |
-| method | HTTP request method. | GET | no |
-| body | HTTP request body. | | no |
-| headers | HTTP request headers. | | no |
-| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
-| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
-| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
-| tls_cert | Client TLS certificate. | | no |
-| tls_key | Client TLS key. | | no |
-
-
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-jobs:
- - name: local
- url: http://127.0.0.1:9796
-
-```
-##### HTTP authentication
-
-Basic HTTP authentication.
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: http://127.0.0.1:9796
- username: username
- password: password
-
-```
-
-
-##### HTTPS with self-signed certificate
-
-Do not validate server certificate chain and hostname.
-
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: https://127.0.0.1:9796
- tls_skip_verify: yes
-
-```
-
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: http://127.0.0.1:9796
-
- - name: remote
- url: http://192.0.2.1:9796
-
-```
-
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `go.d.plugin` to debug the collector:
-
- ```bash
- ./go.d.plugin -d -m apache
- ```
-
-
diff --git a/modules/energid/jsonrpc.go b/modules/energid/jsonrpc.go
deleted file mode 100644
index c3a80e9b0..000000000
--- a/modules/energid/jsonrpc.go
+++ /dev/null
@@ -1,48 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package energid
-
-import (
- "encoding/json"
- "fmt"
-)
-
-// https://www.jsonrpc.org/specification#request_object
-type (
- rpcRequest struct {
- JSONRPC string `json:"jsonrpc"`
- Method string `json:"method"`
- ID int `json:"id"`
- }
- rpcRequests []rpcRequest
-)
-
-// http://www.jsonrpc.org/specification#response_object
-type (
- rpcResponse struct {
- JSONRPC string `json:"jsonrpc"`
- Result json.RawMessage `json:"result"`
- Error *rpcError `json:"error"`
- ID int `json:"id"`
- }
- rpcResponses []rpcResponse
-)
-
-func (rs rpcResponses) getByID(id int) *rpcResponse {
- for _, r := range rs {
- if r.ID == id {
- return &r
- }
- }
- return nil
-}
-
-// http://www.jsonrpc.org/specification#error_object
-type rpcError struct {
- Code int64 `json:"code"`
- Message string `json:"message"`
-}
-
-func (e rpcError) String() string {
- return fmt.Sprintf("%s (code %d)", e.Message, e.Code)
-}
diff --git a/modules/energid/metadata.yaml b/modules/energid/metadata.yaml
deleted file mode 100644
index c32f7cb57..000000000
--- a/modules/energid/metadata.yaml
+++ /dev/null
@@ -1,225 +0,0 @@
-plugin_name: go.d.plugin
-modules:
- - meta:
- id: collector-go.d.plugin-energid
- module_name: apache
- plugin_name: energid
- monitored_instance:
- name: Energi Core Wallet
- link: ""
- icon_filename: energi.png
- categories:
- - data-collection.blockchain-servers
- keywords:
- - energid
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- most_popular: true
- overview:
- data_collection:
- metrics_description: |
- This module monitors Energi Core Wallet instances.
- Works only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet).
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: go.d/energid.conf
- options:
- description: |
- The following options can be defined globally: update_every, autodetection_retry.
- folding:
- title: Config options
- enabled: true
- list:
- - name: update_every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: autodetection_retry
- description: Recheck interval in seconds. Zero means no recheck will be scheduled.
- default_value: 0
- required: false
- - name: url
- description: Server URL.
- default_value: http://127.0.0.1:9796
- required: true
- - name: timeout
- description: HTTP request timeout.
- default_value: 1
- required: false
- - name: username
- description: Username for basic HTTP authentication.
- default_value: ""
- required: false
- - name: password
- description: Password for basic HTTP authentication.
- default_value: ""
- required: false
- - name: proxy_url
- description: Proxy URL.
- default_value: ""
- required: false
- - name: proxy_username
- description: Username for proxy basic HTTP authentication.
- default_value: ""
- required: false
- - name: proxy_password
- description: Password for proxy basic HTTP authentication.
- default_value: ""
- required: false
- - name: method
- description: HTTP request method.
- default_value: GET
- required: false
- - name: body
- description: HTTP request body.
- default_value: ""
- required: false
- - name: headers
- description: HTTP request headers.
- default_value: ""
- required: false
- - name: not_follow_redirects
- description: Redirect handling policy. Controls whether the client follows redirects.
- default_value: no
- required: false
- - name: tls_skip_verify
- description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
- default_value: no
- required: false
- - name: tls_ca
- description: Certification authority that the client uses when verifying the server's certificates.
- default_value: ""
- required: false
- - name: tls_cert
- description: Client TLS certificate.
- default_value: ""
- required: false
- - name: tls_key
- description: Client TLS key.
- default_value: ""
- required: false
- examples:
- folding:
- title: Config
- enabled: true
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration.
- config: |
- jobs:
- - name: local
- url: http://127.0.0.1:9796
- - name: HTTP authentication
- description: Basic HTTP authentication.
- config: |
- jobs:
- - name: local
- url: http://127.0.0.1:9796
- username: username
- password: password
- - name: HTTPS with self-signed certificate
- description: |
- Do not validate server certificate chain and hostname.
- config: |
- jobs:
- - name: local
- url: https://127.0.0.1:9796
- tls_skip_verify: yes
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- jobs:
- - name: local
- url: http://127.0.0.1:9796
-
- - name: remote
- url: http://192.0.2.1:9796
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: energid.blockindex
- description: Blockchain index
- unit: count
- chart_type: area
- dimensions:
- - name: blocks
- - name: headers
- - name: energid.difficulty
- description: Blockchain difficulty
- unit: difficulty
- chart_type: line
- dimensions:
- - name: difficulty
- - name: energid.mempool
- description: Memory pool
- unit: bytes
- chart_type: area
- dimensions:
- - name: max
- - name: usage
- - name: tx_size
- - name: energid.secmem
- description: Secure memory
- unit: bytes
- chart_type: area
- dimensions:
- - name: total
- - name: used
- - name: free
- - name: locked
- - name: energid.network
- description: Network
- unit: connections
- chart_type: line
- dimensions:
- - name: connections
- - name: energid.timeoffset
- description: Network time offset
- unit: seconds
- chart_type: line
- dimensions:
- - name: timeoffset
- - name: energid.utxo_transactions
- description: Transactions
- unit: transactions
- chart_type: line
- dimensions:
- - name: transactions
- - name: output_transactions
diff --git a/modules/energid/metrics.go b/modules/energid/metrics.go
deleted file mode 100644
index 2e77edf91..000000000
--- a/modules/energid/metrics.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package energid
-
-// API docs: https://github.com/energicryptocurrency/core-api-documentation
-
-type energidInfo struct {
- Blockchain *blockchainInfo `stm:"blockchain"`
- MemPool *memPoolInfo `stm:"mempool"`
- Network *networkInfo `stm:"network"`
- TxOutSet *txOutSetInfo `stm:"utxo"`
- Memory *memoryInfo `stm:"secmem"`
-}
-
-// https://github.com/energicryptocurrency/core-api-documentation#getblockchaininfo
-type blockchainInfo struct {
- Blocks float64 `stm:"blocks" json:"blocks"`
- Headers float64 `stm:"headers" json:"headers"`
- Difficulty float64 `stm:"difficulty,1000,1" json:"difficulty"`
-}
-
-// https://github.com/energicryptocurrency/core-api-documentation#getmempoolinfo
-type memPoolInfo struct {
- Bytes float64 `stm:"txsize" json:"bytes"`
- Usage float64 `stm:"current" json:"usage"`
- MaxMemPool float64 `stm:"max" json:"maxmempool"`
-}
-
-// https://github.com/energicryptocurrency/core-api-documentation#getnetworkinfo
-type networkInfo struct {
- TimeOffset float64 `stm:"timeoffset" json:"timeoffset"`
- Connections float64 `stm:"connections" json:"connections"`
-}
-
-// https://github.com/energicryptocurrency/core-api-documentation#gettxoutsetinfo
-type txOutSetInfo struct {
- Transactions float64 `stm:"transactions" json:"transactions"`
- TxOuts float64 `stm:"output_transactions" json:"txouts"`
-}
-
-// undocumented
-type memoryInfo struct {
- Locked struct {
- Used float64 `stm:"used" json:"used"`
- Free float64 `stm:"free" json:"free"`
- Total float64 `stm:"total" json:"total"`
- Locked float64 `stm:"locked" json:"locked"`
- } `stm:"" json:"locked"`
-}
diff --git a/modules/energid/testdata/v2.4.1/getblockchaininfo.json b/modules/energid/testdata/v2.4.1/getblockchaininfo.json
deleted file mode 100644
index 7d194d62a..000000000
--- a/modules/energid/testdata/v2.4.1/getblockchaininfo.json
+++ /dev/null
@@ -1,66 +0,0 @@
-{
- "result": {
- "chain": "test",
- "blocks": 1,
- "headers": 1,
- "bestblockhash": "ee84bfa5f6cafe2ba7f164cee0c33ec63aca76edffa4e8e94656a9be2262cf74",
- "difficulty": 4.656542373906925e-10,
- "mediantime": 1524344801,
- "verificationprogress": 3.57591520058473e-07,
- "chainwork": "0000000000000000000000000000000000000000000000000000000000000002",
- "pruned": false,
- "pos": false,
- "posv2": false,
- "softforks": [
- {
- "id": "bip34",
- "version": 2,
- "reject": {
- "status": false
- }
- },
- {
- "id": "bip66",
- "version": 3,
- "reject": {
- "status": false
- }
- },
- {
- "id": "bip65",
- "version": 4,
- "reject": {
- "status": false
- }
- }
- ],
- "bip9_softforks": {
- "csv": {
- "status": "defined",
- "startTime": 1486252800,
- "timeout": 1549328400,
- "since": 1
- },
- "dip0001": {
- "status": "defined",
- "startTime": 1505692800,
- "timeout": 1549328400,
- "since": 1
- },
- "bip147": {
- "status": "defined",
- "startTime": 1546300800,
- "timeout": 1549328400,
- "since": 1
- },
- "spork17": {
- "status": "defined",
- "startTime": 1566129600,
- "timeout": 1577793600,
- "since": 1
- }
- }
- },
- "error": null,
- "id": "1"
-}
diff --git a/modules/energid/testdata/v2.4.1/getmemoryinfo.json b/modules/energid/testdata/v2.4.1/getmemoryinfo.json
deleted file mode 100644
index 9fdece550..000000000
--- a/modules/energid/testdata/v2.4.1/getmemoryinfo.json
+++ /dev/null
@@ -1,14 +0,0 @@
-{
- "result": {
- "locked": {
- "used": 288,
- "free": 65248,
- "total": 65536,
- "locked": 65536,
- "chunks_used": 4,
- "chunks_free": 2
- }
- },
- "error": null,
- "id": "1"
-}
diff --git a/modules/energid/testdata/v2.4.1/getmempoolinfo.json b/modules/energid/testdata/v2.4.1/getmempoolinfo.json
deleted file mode 100644
index 8845555b1..000000000
--- a/modules/energid/testdata/v2.4.1/getmempoolinfo.json
+++ /dev/null
@@ -1,11 +0,0 @@
-{
- "result": {
- "size": 1,
- "bytes": 1,
- "usage": 1,
- "maxmempool": 300000000,
- "mempoolminfee": 1
- },
- "error": null,
- "id": "1"
-}
diff --git a/modules/energid/testdata/v2.4.1/getnetworkinfo.json b/modules/energid/testdata/v2.4.1/getnetworkinfo.json
deleted file mode 100644
index 59df2c5ad..000000000
--- a/modules/energid/testdata/v2.4.1/getnetworkinfo.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "result": {
- "version": 2040100,
- "subversion": "/Energi Core:2.4.1/",
- "protocolversion": 70213,
- "localservices": "0000000000000005",
- "localrelay": true,
- "timeoffset": 1,
- "networkactive": true,
- "connections": 1,
- "networks": [
- {
- "name": "ipv4",
- "limited": false,
- "reachable": true,
- "proxy": "",
- "proxy_randomize_credentials": false
- },
- {
- "name": "ipv6",
- "limited": false,
- "reachable": true,
- "proxy": "",
- "proxy_randomize_credentials": false
- },
- {
- "name": "onion",
- "limited": true,
- "reachable": false,
- "proxy": "",
- "proxy_randomize_credentials": false
- }
- ],
- "relayfee": 1e-05,
- "incrementalfee": 1e-05,
- "localaddresses": [],
- "warnings": ""
- },
- "error": null,
- "id": "1"
-}
diff --git a/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json b/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json
deleted file mode 100644
index 5bc606f57..000000000
--- a/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json
+++ /dev/null
@@ -1,13 +0,0 @@
-{
- "result": {
- "height": 1,
- "bestblock": "ee84bfa5f6cafe2ba7f164cee0c33ec63aca76edffa4e8e94656a9be2262cf74",
- "transactions": 1,
- "txouts": 1,
- "hash_serialized_2": "ba3631e5919f37c8f542658238de0516612a7063fbd6143ef813a4e1cc4548e1",
- "disk_size": 1,
- "total_amount": 1
- },
- "error": null,
- "id": "1"
-}
diff --git a/modules/envoy/config_schema.json b/modules/envoy/config_schema.json
index 48b3c9478..bdcaacc5a 100644
--- a/modules/envoy/config_schema.json
+++ b/modules/envoy/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/envoy job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Envoy collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Envoy Prometheus endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:9091/stats/prometheus"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/envoy/envoy.go b/modules/envoy/envoy.go
index de9efa13d..e8fa07c33 100644
--- a/modules/envoy/envoy.go
+++ b/modules/envoy/envoy.go
@@ -4,6 +4,7 @@ package envoy
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -29,7 +30,7 @@ func New() *Envoy {
URL: "http://127.0.0.1:9091/stats/prometheus",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -46,17 +47,18 @@ func New() *Envoy {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type Envoy struct {
module.Base
- Config `yaml:",inline"`
-
- prom prometheus.Prometheus
+ Config `yaml:",inline" json:""`
charts *module.Charts
+ prom prometheus.Prometheus
+
servers map[string]bool
clusterMgrs map[string]bool
clusterUpstream map[string]bool
@@ -65,24 +67,37 @@ type Envoy struct {
listenerDownstream map[string]bool
}
-func (e *Envoy) Init() bool {
+func (e *Envoy) Configuration() any {
+ return e.Config
+}
+
+func (e *Envoy) Init() error {
if err := e.validateConfig(); err != nil {
e.Errorf("config validation: %v", err)
- return false
+ return err
}
prom, err := e.initPrometheusClient()
if err != nil {
e.Errorf("init Prometheus client: %v", err)
- return false
+ return err
}
e.prom = prom
- return true
+ return nil
}
-func (e *Envoy) Check() bool {
- return len(e.Collect()) > 0
+func (e *Envoy) Check() error {
+ mx, err := e.collect()
+ if err != nil {
+ e.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (e *Envoy) Charts() *module.Charts {
diff --git a/modules/envoy/envoy_test.go b/modules/envoy/envoy_test.go
index 3bdd82cb1..50bb51bba 100644
--- a/modules/envoy/envoy_test.go
+++ b/modules/envoy/envoy_test.go
@@ -3,6 +3,7 @@
package envoy
import (
+ "github.com/netdata/go.d.plugin/agent/module"
"net/http"
"net/http/httptest"
"os"
@@ -15,19 +16,28 @@ import (
)
var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
dataEnvoyConsulDataplane, _ = os.ReadFile("testdata/consul-dataplane.txt")
dataEnvoy, _ = os.ReadFile("testdata/envoy.txt")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
"dataEnvoyConsulDataplane": dataEnvoyConsulDataplane,
"dataEnvoy": dataEnvoy,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestEnvoy_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Envoy{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestEnvoy_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -53,9 +63,9 @@ func TestEnvoy_Init(t *testing.T) {
envoy.Config = test.config
if test.wantFail {
- assert.False(t, envoy.Init())
+ assert.Error(t, envoy.Init())
} else {
- assert.True(t, envoy.Init())
+ assert.NoError(t, envoy.Init())
}
})
}
@@ -66,7 +76,7 @@ func TestEnvoy_Cleanup(t *testing.T) {
envoy := New()
assert.NotPanics(t, envoy.Cleanup)
- require.True(t, envoy.Init())
+ require.NoError(t, envoy.Init())
assert.NotPanics(t, envoy.Cleanup)
}
@@ -76,7 +86,7 @@ func TestEnvoy_Charts(t *testing.T) {
require.Empty(t, *envoy.Charts())
- require.True(t, envoy.Init())
+ require.NoError(t, envoy.Init())
_ = envoy.Collect()
require.NotEmpty(t, *envoy.Charts())
}
@@ -109,12 +119,12 @@ func TestEnvoy_Check(t *testing.T) {
envoy, cleanup := test.prepare()
defer cleanup()
- require.True(t, envoy.Init())
+ require.NoError(t, envoy.Init())
if test.wantFail {
- assert.False(t, envoy.Check())
+ assert.Error(t, envoy.Check())
} else {
- assert.True(t, envoy.Check())
+ assert.NoError(t, envoy.Check())
}
})
}
@@ -489,7 +499,7 @@ func TestEnvoy_Collect(t *testing.T) {
envoy, cleanup := test.prepare()
defer cleanup()
- require.True(t, envoy.Init())
+ require.NoError(t, envoy.Init())
mx := envoy.Collect()
diff --git a/modules/envoy/testdata/config.json b/modules/envoy/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/envoy/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/envoy/testdata/config.yaml b/modules/envoy/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/envoy/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/example/config_schema.json b/modules/example/config_schema.json
index 852b39b1c..95dba7e06 100644
--- a/modules/example/config_schema.json
+++ b/modules/example/config_schema.json
@@ -1,68 +1,151 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/example job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "charts": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string"
- },
- "num": {
- "type": "integer"
- },
- "contexts": {
- "type": "integer"
- },
- "dimensions": {
- "type": "integer"
- },
- "labels": {
- "type": "integer"
- }
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Example collector configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
},
- "required": [
- "type",
- "num",
- "contexts",
- "dimensions",
- "labels"
- ]
- },
- "hidden_charts": {
- "type": "object",
- "properties": {
- "type": {
- "type": "string"
+ "charts": {
+ "title": "Charts configuration",
+ "type": "object",
+ "properties": {
+ "type": {
+ "title": "Chart type",
+ "description": "The type of all charts.",
+ "type": "string",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ],
+ "default": "line"
+ },
+ "num": {
+ "title": "Number of charts",
+ "description": "The total number of charts to create.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 1
+ },
+ "contexts": {
+ "title": "Number of contexts",
+ "description": "The total number of unique contexts.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ },
+ "dimensions": {
+ "title": "Number of dimensions",
+ "description": "The number of dimensions each chart will have.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 4
+ },
+ "labels": {
+ "title": "Number of labels",
+ "description": "The number of labels each chart will have.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ }
},
- "num": {
- "type": "integer"
+ "required": [
+ "type",
+ "num",
+ "contexts",
+ "dimensions",
+ "labels"
+ ]
+ },
+ "hidden_charts": {
+ "title": "Hidden charts configuration",
+ "type": "object",
+ "properties": {
+ "type": {
+ "title": "Chart type",
+ "description": "The type of all charts.",
+ "type": "string",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ],
+ "default": "line"
+ },
+ "num": {
+ "title": "Number of charts",
+ "description": "The total number of charts to create.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ },
+ "contexts": {
+ "title": "Number of contexts",
+ "description": "The total number of unique contexts.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ },
+ "dimensions": {
+ "title": "Number of dimensions",
+ "description": "The number of dimensions each chart will have.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 4
+ },
+ "labels": {
+ "title": "Number of labels",
+ "description": "The number of labels each chart will have.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 0
+ }
},
- "contexts": {
- "type": "integer"
+ "required": [
+ "type",
+ "num",
+ "contexts",
+ "dimensions",
+ "labels"
+ ]
+ }
+ },
+ "required": [
+ "charts"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every"
+ ]
},
- "dimensions": {
- "type": "integer"
+ {
+ "title": "Charts",
+ "fields": [
+ "charts"
+ ]
},
- "labels": {
- "type": "integer"
+ {
+ "title": "Hidden charts",
+ "fields": [
+ "hidden_charts"
+ ]
}
- },
- "required": [
- "type",
- "num",
- "contexts",
- "dimensions",
- "labels"
]
}
- },
- "required": [
- "name",
- "charts"
- ]
+ }
}
diff --git a/modules/example/example.go b/modules/example/example.go
index fe24bcc3e..ca39c4a74 100644
--- a/modules/example/example.go
+++ b/modules/example/example.go
@@ -16,10 +16,9 @@ func init() {
module.Register("example", module.Creator{
JobConfigSchema: configSchema,
Defaults: module.Defaults{
- UpdateEvery: module.UpdateEvery,
- AutoDetectionRetry: module.AutoDetectionRetry,
- Priority: module.Priority,
- Disabled: true,
+ UpdateEvery: module.UpdateEvery,
+ Priority: module.Priority,
+ //Disabled: true,
},
Create: func() module.Module { return New() },
})
@@ -45,15 +44,15 @@ func New() *Example {
type (
Config struct {
- Charts ConfigCharts `yaml:"charts"`
- HiddenCharts ConfigCharts `yaml:"hidden_charts"`
+ Charts ConfigCharts `yaml:"charts" json:"charts"`
+ HiddenCharts ConfigCharts `yaml:"hidden_charts" json:"hidden_charts"`
}
ConfigCharts struct {
- Type string `yaml:"type"`
- Num int `yaml:"num"`
- Contexts int `yaml:"contexts"`
- Dims int `yaml:"dimensions"`
- Labels int `yaml:"labels"`
+ Type string `yaml:"type" json:"type"`
+ Num int `yaml:"num" json:"num"`
+ Contexts int `yaml:"contexts" json:"context"`
+ Dims int `yaml:"dimensions" json:"dimensions"`
+ Labels int `yaml:"labels" json:"labels"`
}
)
@@ -66,24 +65,28 @@ type Example struct {
collectedDims map[string]bool
}
-func (e *Example) Init() bool {
+func (e *Example) Configuration() any {
+ return e.Config
+}
+
+func (e *Example) Init() error {
err := e.validateConfig()
if err != nil {
e.Errorf("config validation: %v", err)
- return false
+ return err
}
charts, err := e.initCharts()
if err != nil {
e.Errorf("charts init: %v", err)
- return false
+ return err
}
e.charts = charts
- return true
+ return nil
}
-func (e *Example) Check() bool {
- return len(e.Collect()) > 0
+func (e *Example) Check() error {
+ return nil
}
func (e *Example) Charts() *module.Charts {
diff --git a/modules/example/example_test.go b/modules/example/example_test.go
index 47cc51a2f..36181727d 100644
--- a/modules/example/example_test.go
+++ b/modules/example/example_test.go
@@ -96,9 +96,9 @@ func TestExample_Init(t *testing.T) {
example.Config = test.config
if test.wantFail {
- assert.False(t, example.Init())
+ assert.Error(t, example.Init())
} else {
- assert.True(t, example.Init())
+ assert.NoError(t, example.Init())
}
})
}
@@ -124,12 +124,12 @@ func TestExample_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
example := test.prepare()
- require.True(t, example.Init())
+ require.NoError(t, example.Init())
if test.wantFail {
- assert.False(t, example.Check())
+ assert.Error(t, example.Check())
} else {
- assert.True(t, example.Check())
+ assert.NoError(t, example.Check())
}
})
}
@@ -153,7 +153,7 @@ func TestExample_Charts(t *testing.T) {
"initialized collector": {
prepare: func(t *testing.T) *Example {
example := New()
- require.True(t, example.Init())
+ require.NoError(t, example.Init())
return example
},
},
@@ -259,7 +259,7 @@ func TestExample_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
example := test.prepare()
- require.True(t, example.Init())
+ require.NoError(t, example.Init())
collected := example.Collect()
diff --git a/modules/filecheck/collect_dirs.go b/modules/filecheck/collect_dirs.go
index 32861c0e0..622cbf76a 100644
--- a/modules/filecheck/collect_dirs.go
+++ b/modules/filecheck/collect_dirs.go
@@ -14,7 +14,7 @@ import (
func (fc *Filecheck) collectDirs(ms map[string]int64) {
curTime := time.Now()
- if time.Since(fc.lastDiscoveryDirs) >= fc.DiscoveryEvery.Duration {
+ if time.Since(fc.lastDiscoveryDirs) >= fc.DiscoveryEvery.Duration() {
fc.lastDiscoveryDirs = curTime
fc.curDirs = fc.discoveryDirs()
fc.updateDirsCharts(fc.curDirs)
@@ -54,7 +54,7 @@ func (fc *Filecheck) collectDir(ms map[string]int64, path string, curTime time.T
}
}
-func (fc Filecheck) discoveryDirs() (dirs []string) {
+func (fc *Filecheck) discoveryDirs() (dirs []string) {
for _, path := range fc.Dirs.Include {
if hasMeta(path) {
continue
diff --git a/modules/filecheck/collect_files.go b/modules/filecheck/collect_files.go
index 25568473f..a3dd93ef8 100644
--- a/modules/filecheck/collect_files.go
+++ b/modules/filecheck/collect_files.go
@@ -14,7 +14,7 @@ import (
func (fc *Filecheck) collectFiles(ms map[string]int64) {
curTime := time.Now()
- if time.Since(fc.lastDiscoveryFiles) >= fc.DiscoveryEvery.Duration {
+ if time.Since(fc.lastDiscoveryFiles) >= fc.DiscoveryEvery.Duration() {
fc.lastDiscoveryFiles = curTime
fc.curFiles = fc.discoveryFiles()
fc.updateFilesCharts(fc.curFiles)
@@ -47,7 +47,7 @@ func (fc *Filecheck) collectFile(ms map[string]int64, path string, curTime time.
ms[fileDimID(path, "mtime_ago")] = int64(curTime.Sub(info.ModTime()).Seconds())
}
-func (fc Filecheck) discoveryFiles() (files []string) {
+func (fc *Filecheck) discoveryFiles() (files []string) {
for _, path := range fc.Files.Include {
if hasMeta(path) {
continue
diff --git a/modules/filecheck/config_schema.json b/modules/filecheck/config_schema.json
index a6b0efca9..0a77f817e 100644
--- a/modules/filecheck/config_schema.json
+++ b/modules/filecheck/config_schema.json
@@ -1,75 +1,107 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/filecheck job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "discovery_every": {
- "type": [
- "string",
- "integer"
- ]
- },
- "files": {
- "type": "object",
- "properties": {
- "include": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "exclude": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Filecheck collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
},
- "required": [
- "include",
- "exclude"
- ]
- },
- "dirs": {
- "type": "object",
- "properties": {
- "include": {
- "type": "array",
- "items": {
- "type": "string"
+ "files": {
+ "title": "File selector",
+ "description": "Files matching the selector will be monitored. The logic for inclusion and exclusion is as follows: (include1 OR include2) AND !(exclude1 or exclude2). Patterns follow the syntax of shell file name patterns.",
+ "type": "object",
+ "properties": {
+ "include": {
+ "title": "Include",
+ "description": "Include files that match any of the specified include patterns.",
+ "type": "array",
+ "items": {
+ "title": "Filepath",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "exclude": {
+ "title": "Exclude",
+ "description": "Exclude files that match any of the specified exclude patterns.",
+ "type": "array",
+ "items": {
+ "title": "Filepath",
+ "type": "string"
+ },
+ "uniqueItems": true
}
},
- "exclude": {
- "type": "array",
- "items": {
- "type": "string"
+ "required": [
+ "include"
+ ]
+ },
+ "dirs": {
+ "title": "Directory selector",
+ "description": "Directories matching the selector will be monitored. The logic for inclusion and exclusion is as follows: (include1 OR include2) AND !(exclude1 or exclude2). Patterns follow the syntax of shell file name patterns.",
+ "type": "object",
+ "properties": {
+ "include": {
+ "title": "Include",
+ "description": "Include directories that match any of the specified include patterns.",
+ "type": "array",
+ "items": {
+ "title": "Directory",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "exclude": {
+ "title": "Exclude",
+ "description": "Exclude directories that match any of the specified exclude patterns.",
+ "type": "array",
+ "items": {
+ "title": "Directory",
+ "type": "string"
+ },
+ "uniqueItems": true
}
},
- "collect_dir_size": {
- "type": "boolean"
- }
+ "required": [
+ "include"
+ ]
},
- "required": [
- "include",
- "exclude"
- ]
+ "collect_dir_size": {
+ "title": "Collect directory size?",
+ "description": "Enable the collection of directory sizes for each monitored directory. Enabling this option may introduce additional overhead on both Netdata and the host system, particularly if directories contain a large number of subdirectories and files.",
+ "type": "boolean",
+ "default": false
+ }
}
},
- "oneOf": [
- {
- "required": [
- "name",
- "files"
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Files",
+ "fields": [
+ "update_every",
+ "files"
+ ]
+ },
+ {
+ "title": "Directories",
+ "fields": [
+ "update_every",
+ "collect_dir_size",
+ "dirs"
+ ]
+ }
]
},
- {
- "required": [
- "name",
- "dirs"
- ]
+ "uiOptions": {
+ "fullPage": true
}
- ]
+ }
}
diff --git a/modules/filecheck/filecheck.go b/modules/filecheck/filecheck.go
index e1369bc1c..f528929fc 100644
--- a/modules/filecheck/filecheck.go
+++ b/modules/filecheck/filecheck.go
@@ -26,7 +26,7 @@ func init() {
func New() *Filecheck {
return &Filecheck{
Config: Config{
- DiscoveryEvery: web.Duration{Duration: time.Second * 30},
+ DiscoveryEvery: web.Duration(time.Second * 30),
Files: filesConfig{},
Dirs: dirsConfig{
CollectDirSize: true,
@@ -39,24 +39,27 @@ func New() *Filecheck {
type (
Config struct {
- DiscoveryEvery web.Duration `yaml:"discovery_every"`
- Files filesConfig `yaml:"files"`
- Dirs dirsConfig `yaml:"dirs"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ DiscoveryEvery web.Duration `yaml:"discovery_every" json:"discovery_every"`
+ Files filesConfig `yaml:"files" json:"files"`
+ Dirs dirsConfig `yaml:"dirs" json:"dirs"`
}
filesConfig struct {
- Include []string `yaml:"include"`
- Exclude []string `yaml:"exclude"`
+ Include []string `yaml:"include" json:"include"`
+ Exclude []string `yaml:"exclude" json:"exclude"`
}
dirsConfig struct {
- Include []string `yaml:"include"`
- Exclude []string `yaml:"exclude"`
- CollectDirSize bool `yaml:"collect_dir_size"`
+ Include []string `yaml:"include" json:"include"`
+ Exclude []string `yaml:"exclude" json:"exclude"`
+ CollectDirSize bool `yaml:"collect_dir_size" json:"collect_dir_size"`
}
)
type Filecheck struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
lastDiscoveryFiles time.Time
curFiles []string
@@ -65,34 +68,34 @@ type Filecheck struct {
lastDiscoveryDirs time.Time
curDirs []string
collectedDirs map[string]bool
-
- charts *module.Charts
}
-func (Filecheck) Cleanup() {
+func (fc *Filecheck) Configuration() any {
+ return fc.Config
}
-func (fc *Filecheck) Init() bool {
+func (fc *Filecheck) Init() error {
err := fc.validateConfig()
if err != nil {
fc.Errorf("error on validating config: %v", err)
- return false
+ return err
}
charts, err := fc.initCharts()
if err != nil {
fc.Errorf("error on charts initialization: %v", err)
- return false
+ return err
}
fc.charts = charts
fc.Debugf("monitored files: %v", fc.Files.Include)
fc.Debugf("monitored dirs: %v", fc.Dirs.Include)
- return true
+
+ return nil
}
-func (fc Filecheck) Check() bool {
- return true
+func (fc *Filecheck) Check() error {
+ return nil
}
func (fc *Filecheck) Charts() *module.Charts {
@@ -110,3 +113,6 @@ func (fc *Filecheck) Collect() map[string]int64 {
}
return ms
}
+
+func (fc *Filecheck) Cleanup() {
+}
diff --git a/modules/filecheck/filecheck_test.go b/modules/filecheck/filecheck_test.go
index 5024f6460..d2e70d0e9 100644
--- a/modules/filecheck/filecheck_test.go
+++ b/modules/filecheck/filecheck_test.go
@@ -3,6 +3,7 @@
package filecheck
import (
+ "os"
"strings"
"testing"
@@ -12,8 +13,22 @@ import (
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestFilecheck_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Filecheck{}, dataConfigJSON, dataConfigYAML)
}
func TestFilecheck_Cleanup(t *testing.T) {
@@ -86,9 +101,9 @@ func TestFilecheck_Init(t *testing.T) {
fc.Config = test.config
if test.wantFail {
- assert.False(t, fc.Init())
+ assert.Error(t, fc.Init())
} else {
- require.True(t, fc.Init())
+ require.NoError(t, fc.Init())
assert.Equal(t, test.wantNumOfCharts, len(*fc.Charts()))
}
})
@@ -111,9 +126,9 @@ func TestFilecheck_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
fc := test.prepare()
- require.True(t, fc.Init())
+ require.NoError(t, fc.Init())
- assert.True(t, fc.Check())
+ assert.NoError(t, fc.Check())
})
}
}
@@ -226,7 +241,7 @@ func TestFilecheck_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
fc := test.prepare()
- require.True(t, fc.Init())
+ require.NoError(t, fc.Init())
collected := fc.Collect()
diff --git a/modules/filecheck/init.go b/modules/filecheck/init.go
index 858e3e503..b2e27459a 100644
--- a/modules/filecheck/init.go
+++ b/modules/filecheck/init.go
@@ -8,14 +8,14 @@ import (
"github.com/netdata/go.d.plugin/agent/module"
)
-func (fc Filecheck) validateConfig() error {
+func (fc *Filecheck) validateConfig() error {
if len(fc.Files.Include) == 0 && len(fc.Dirs.Include) == 0 {
return errors.New("both 'files->include' and 'dirs->include' are empty")
}
return nil
}
-func (fc Filecheck) initCharts() (*module.Charts, error) {
+func (fc *Filecheck) initCharts() (*module.Charts, error) {
charts := &module.Charts{}
if len(fc.Files.Include) > 0 {
diff --git a/modules/filecheck/metadata.yaml b/modules/filecheck/metadata.yaml
index d4e78cea1..57a121ec1 100644
--- a/modules/filecheck/metadata.yaml
+++ b/modules/filecheck/metadata.yaml
@@ -60,7 +60,7 @@ modules:
default_value: 0
required: false
- name: files
- description: List of files to monitor.
+ description: Files matching the selector will be monitored.
default_value: ""
required: true
detailed_description: |
diff --git a/modules/filecheck/testdata/config.json b/modules/filecheck/testdata/config.json
new file mode 100644
index 000000000..93d286f84
--- /dev/null
+++ b/modules/filecheck/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "discovery_every": 123.123,
+ "files": {
+ "include": [
+ "ok"
+ ],
+ "exclude": [
+ "ok"
+ ]
+ },
+ "dirs": {
+ "include": [
+ "ok"
+ ],
+ "exclude": [
+ "ok"
+ ],
+ "collect_dir_size": true
+ }
+}
diff --git a/modules/filecheck/testdata/config.yaml b/modules/filecheck/testdata/config.yaml
new file mode 100644
index 000000000..494a21855
--- /dev/null
+++ b/modules/filecheck/testdata/config.yaml
@@ -0,0 +1,13 @@
+update_every: 123
+discovery_every: 123.123
+files:
+ include:
+ - "ok"
+ exclude:
+ - "ok"
+dirs:
+ include:
+ - "ok"
+ exclude:
+ - "ok"
+ collect_dir_size: yes
diff --git a/modules/fluentd/collect.go b/modules/fluentd/collect.go
new file mode 100644
index 000000000..14ee6df68
--- /dev/null
+++ b/modules/fluentd/collect.go
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import "fmt"
+
+func (f *Fluentd) collect() (map[string]int64, error) {
+ info, err := f.apiClient.getPluginsInfo()
+ if err != nil {
+ return nil, err
+ }
+
+ mx := make(map[string]int64)
+
+ for _, p := range info.Payload {
+ // TODO: if p.Category == "input" ?
+ if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() {
+ continue
+ }
+
+ if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) {
+ f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category)
+ continue
+ }
+
+ id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category)
+
+ if p.hasCategory() {
+ mx[id+"_retry_count"] = *p.RetryCount
+ }
+ if p.hasBufferQueueLength() {
+ mx[id+"_buffer_queue_length"] = *p.BufferQueueLength
+ }
+ if p.hasBufferTotalQueuedSize() {
+ mx[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize
+ }
+
+ if !f.activePlugins[id] {
+ f.activePlugins[id] = true
+ f.addPluginToCharts(p)
+ }
+
+ }
+
+ return mx, nil
+}
+
+func (f *Fluentd) addPluginToCharts(p pluginData) {
+ id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category)
+
+ if p.hasCategory() {
+ chart := f.charts.Get("retry_count")
+ _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID})
+ chart.MarkNotCreated()
+ }
+ if p.hasBufferQueueLength() {
+ chart := f.charts.Get("buffer_queue_length")
+ _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID})
+ chart.MarkNotCreated()
+ }
+ if p.hasBufferTotalQueuedSize() {
+ chart := f.charts.Get("buffer_total_queued_size")
+ _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID})
+ chart.MarkNotCreated()
+ }
+}
diff --git a/modules/fluentd/config_schema.json b/modules/fluentd/config_schema.json
index f5bfe3047..0c9b16416 100644
--- a/modules/fluentd/config_schema.json
+++ b/modules/fluentd/config_schema.json
@@ -1,62 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/fluentd job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Fluentd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Fluentd built-in webserver.",
+ "type": "string",
+ "default": "http://127.0.0.1:24220"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "permit_plugin_id": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/fluentd/fluentd.go b/modules/fluentd/fluentd.go
index 5b627b7b4..347ca66dd 100644
--- a/modules/fluentd/fluentd.go
+++ b/modules/fluentd/fluentd.go
@@ -4,13 +4,12 @@ package fluentd
import (
_ "embed"
- "fmt"
+ "errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -23,145 +22,100 @@ func init() {
})
}
-const (
- defaultURL = "http://127.0.0.1:24220"
- defaultHTTPTimeout = time.Second * 2
-)
-
-// New creates Fluentd with default values.
func New() *Fluentd {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
- },
- }}
-
return &Fluentd{
- Config: config,
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:24220",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
+ }},
activePlugins: make(map[string]bool),
charts: charts.Copy(),
}
}
type Config struct {
- web.HTTP `yaml:",inline"`
- PermitPlugin string `yaml:"permit_plugin_id"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ PermitPlugin string `yaml:"permit_plugin_id" json:"permit_plugin_id"`
}
-// Fluentd Fluentd module.
type Fluentd struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *Charts
+
+ apiClient *apiClient
permitPlugin matcher.Matcher
- apiClient *apiClient
activePlugins map[string]bool
- charts *Charts
}
-// Cleanup makes cleanup.
-func (Fluentd) Cleanup() {}
+func (f *Fluentd) Configuration() any {
+ return f.Config
+}
-// Init makes initialization.
-func (f *Fluentd) Init() bool {
- if f.URL == "" {
- f.Error("URL not set")
- return false
+func (f *Fluentd) Init() error {
+ if err := f.validateConfig(); err != nil {
+ f.Error(err)
+ return err
}
- if f.PermitPlugin != "" {
- m, err := matcher.NewSimplePatternsMatcher(f.PermitPlugin)
- if err != nil {
- f.Errorf("error on creating permit_plugin matcher : %v", err)
- return false
- }
- f.permitPlugin = matcher.WithCache(m)
+ pm, err := f.initPermitPluginMatcher()
+ if err != nil {
+ f.Error(err)
+ return err
}
+ f.permitPlugin = pm
- client, err := web.NewHTTPClient(f.Client)
+ client, err := f.initApiClient()
if err != nil {
- f.Errorf("error on creating client : %v", err)
- return false
+ f.Error(err)
+ return err
}
-
- f.apiClient = newAPIClient(client, f.Request)
+ f.apiClient = client
f.Debugf("using URL %s", f.URL)
- f.Debugf("using timeout: %s", f.Timeout.Duration)
+ f.Debugf("using timeout: %s", f.Timeout.Duration())
- return true
+ return nil
}
-// Check makes check.
-func (f Fluentd) Check() bool { return len(f.Collect()) > 0 }
-
-// Charts creates Charts.
-func (f Fluentd) Charts() *Charts { return f.charts }
-
-// Collect collects metrics.
-func (f *Fluentd) Collect() map[string]int64 {
- info, err := f.apiClient.getPluginsInfo()
-
+func (f *Fluentd) Check() error {
+ mx, err := f.collect()
if err != nil {
f.Error(err)
- return nil
+ return err
}
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
- metrics := make(map[string]int64)
-
- for _, p := range info.Payload {
- // TODO: if p.Category == "input" ?
- if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() {
- continue
- }
-
- if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) {
- f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category)
- continue
- }
-
- id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category)
+ }
+ return nil
+}
- if p.hasCategory() {
- metrics[id+"_retry_count"] = *p.RetryCount
- }
- if p.hasBufferQueueLength() {
- metrics[id+"_buffer_queue_length"] = *p.BufferQueueLength
- }
- if p.hasBufferTotalQueuedSize() {
- metrics[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize
- }
+func (f *Fluentd) Charts() *Charts {
+ return f.charts
+}
- if !f.activePlugins[id] {
- f.activePlugins[id] = true
- f.addPluginToCharts(p)
- }
+func (f *Fluentd) Collect() map[string]int64 {
+ mx, err := f.collect()
+ if err != nil {
+ f.Error(err)
+ return nil
}
- return metrics
+ return mx
}
-func (f *Fluentd) addPluginToCharts(p pluginData) {
- id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category)
-
- if p.hasCategory() {
- chart := f.charts.Get("retry_count")
- _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID})
- chart.MarkNotCreated()
- }
- if p.hasBufferQueueLength() {
- chart := f.charts.Get("buffer_queue_length")
- _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID})
- chart.MarkNotCreated()
- }
- if p.hasBufferTotalQueuedSize() {
- chart := f.charts.Get("buffer_total_queued_size")
- _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID})
- chart.MarkNotCreated()
+func (f *Fluentd) Cleanup() {
+ if f.apiClient != nil && f.apiClient.httpClient != nil {
+ f.apiClient.httpClient.CloseIdleConnections()
}
}
diff --git a/modules/fluentd/fluentd_test.go b/modules/fluentd/fluentd_test.go
index 492e2ebaa..095c0a939 100644
--- a/modules/fluentd/fluentd_test.go
+++ b/modules/fluentd/fluentd_test.go
@@ -8,51 +8,63 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-var testDataPlugins, _ = os.ReadFile("testdata/plugins.json")
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
-func TestNew(t *testing.T) {
- job := New()
- assert.IsType(t, (*Fluentd)(nil), job)
- assert.NotNil(t, job.charts)
- assert.NotNil(t, job.activePlugins)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+ dataPluginsMetrics, _ = os.ReadFile("testdata/plugins.json")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataPluginsMetrics": dataPluginsMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestFluentd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Fluentd{}, dataConfigJSON, dataConfigYAML)
}
func TestFluentd_Init(t *testing.T) {
// OK
job := New()
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
assert.NotNil(t, job.apiClient)
//NG
job = New()
job.URL = ""
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestFluentd_Check(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testDataPlugins)
+ _, _ = w.Write(dataPluginsMetrics)
}))
defer ts.Close()
// OK
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
// NG
job = New()
job.URL = "http://127.0.0.1:38001/api/plugins.json"
- require.True(t, job.Init())
- require.False(t, job.Check())
+ require.NoError(t, job.Init())
+ require.Error(t, job.Check())
}
func TestFluentd_Charts(t *testing.T) {
@@ -66,15 +78,15 @@ func TestFluentd_Cleanup(t *testing.T) {
func TestFluentd_Collect(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testDataPlugins)
+ _, _ = w.Write(dataPluginsMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"output_stdout_stdout_output_retry_count": 0,
@@ -97,8 +109,8 @@ func TestFluentd_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestFluentd_404(t *testing.T) {
@@ -110,6 +122,6 @@ func TestFluentd_404(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/fluentd/init.go b/modules/fluentd/init.go
new file mode 100644
index 000000000..89914d793
--- /dev/null
+++ b/modules/fluentd/init.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package fluentd
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (f *Fluentd) validateConfig() error {
+ if f.URL == "" {
+ return errors.New("url not set")
+ }
+
+ return nil
+}
+
+func (f *Fluentd) initPermitPluginMatcher() (matcher.Matcher, error) {
+ if f.PermitPlugin == "" {
+ return matcher.TRUE(), nil
+ }
+
+ return matcher.NewSimplePatternsMatcher(f.PermitPlugin)
+}
+
+func (f *Fluentd) initApiClient() (*apiClient, error) {
+ client, err := web.NewHTTPClient(f.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return newAPIClient(client, f.Request), nil
+}
diff --git a/modules/fluentd/metadata.yaml b/modules/fluentd/metadata.yaml
index 99e85da1a..0a6a66058 100644
--- a/modules/fluentd/metadata.yaml
+++ b/modules/fluentd/metadata.yaml
@@ -63,7 +63,7 @@ modules:
required: true
- name: timeout
description: HTTP request timeout.
- default_value: 2
+ default_value: 1
required: false
- name: username
description: Username for basic HTTP authentication.
diff --git a/modules/fluentd/testdata/config.json b/modules/fluentd/testdata/config.json
new file mode 100644
index 000000000..6477bd57d
--- /dev/null
+++ b/modules/fluentd/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "permit_plugin_id": "ok"
+}
diff --git a/modules/fluentd/testdata/config.yaml b/modules/fluentd/testdata/config.yaml
new file mode 100644
index 000000000..0afd42e67
--- /dev/null
+++ b/modules/fluentd/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+permit_plugin_id: "ok"
diff --git a/modules/freeradius/config_schema.json b/modules/freeradius/config_schema.json
index b8bd25fa9..660270d24 100644
--- a/modules/freeradius/config_schema.json
+++ b/modules/freeradius/config_schema.json
@@ -1,31 +1,56 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/freeradius job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "FreeRADIUS collector configuration schema.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "Server address.",
+ "type": "string",
+ "default": "127.0.0.1"
+ },
+ "port": {
+ "title": "Port",
+ "description": "Server port.",
+ "type": "integer",
+ "default": 18121
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "TCP connect/read/write timeout in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "secret": {
+ "title": "Secret",
+ "description": "Shared secret key.",
+ "type": "string"
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address",
+ "port",
+ "secret"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "port": {
- "type": "integer"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"secret": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "address",
- "port",
- "secret"
- ]
+ }
}
diff --git a/modules/freeradius/freeradius.go b/modules/freeradius/freeradius.go
index 5897917cf..6be6f6107 100644
--- a/modules/freeradius/freeradius.go
+++ b/modules/freeradius/freeradius.go
@@ -7,10 +7,9 @@ import (
"errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/modules/freeradius/api"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -24,72 +23,70 @@ func init() {
}
func New() *FreeRADIUS {
- cfg := Config{
- Address: "127.0.0.1",
- Port: 18121,
- Secret: "adminsecret",
- Timeout: web.Duration{Duration: time.Second},
- }
return &FreeRADIUS{
- Config: cfg,
+ Config: Config{
+ Address: "127.0.0.1",
+ Port: 18121,
+ Secret: "adminsecret",
+ Timeout: web.Duration(time.Second),
+ },
}
}
+type Config struct {
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Port int `yaml:"port" json:"port"`
+ Secret string `yaml:"secret" json:"secret"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+}
+
type (
- client interface {
- Status() (*api.Status, error)
- }
- Config struct {
- Address string
- Port int
- Secret string
- Timeout web.Duration
- }
FreeRADIUS struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
client
}
+ client interface {
+ Status() (*api.Status, error)
+ }
)
-func (f FreeRADIUS) validateConfig() error {
- if f.Address == "" {
- return errors.New("address not set")
- }
- if f.Port == 0 {
- return errors.New("port not set")
- }
- if f.Secret == "" {
- return errors.New("secret not set")
- }
- return nil
+func (f *FreeRADIUS) Configuration() any {
+ return f.Config
}
-func (f *FreeRADIUS) initClient() {
+func (f *FreeRADIUS) Init() error {
+ if err := f.validateConfig(); err != nil {
+ f.Errorf("config validation: %v", err)
+ return err
+ }
+
f.client = api.New(api.Config{
Address: f.Address,
Port: f.Port,
Secret: f.Secret,
- Timeout: f.Timeout.Duration,
+ Timeout: f.Timeout.Duration(),
})
+
+ return nil
}
-func (f *FreeRADIUS) Init() bool {
- err := f.validateConfig()
+func (f *FreeRADIUS) Check() error {
+ mx, err := f.collect()
if err != nil {
- f.Errorf("error on validating config: %v", err)
- return false
+ f.Error(err)
+ return err
}
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
- f.initClient()
- return true
-}
-
-func (f FreeRADIUS) Check() bool {
- return len(f.Collect()) > 0
+ }
+ return nil
}
-func (FreeRADIUS) Charts() *Charts {
+func (f *FreeRADIUS) Charts() *Charts {
return charts.Copy()
}
@@ -105,4 +102,4 @@ func (f *FreeRADIUS) Collect() map[string]int64 {
return mx
}
-func (FreeRADIUS) Cleanup() {}
+func (f *FreeRADIUS) Cleanup() {}
diff --git a/modules/freeradius/freeradius_test.go b/modules/freeradius/freeradius_test.go
index b9432ec96..0455269ff 100644
--- a/modules/freeradius/freeradius_test.go
+++ b/modules/freeradius/freeradius_test.go
@@ -4,57 +4,73 @@ package freeradius
import (
"errors"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/modules/freeradius/api"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
)
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestFreeRADIUS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &FreeRADIUS{}, dataConfigJSON, dataConfigYAML)
}
func TestFreeRADIUS_Init(t *testing.T) {
freeRADIUS := New()
- assert.True(t, freeRADIUS.Init())
+ assert.NoError(t, freeRADIUS.Init())
}
func TestFreeRADIUS_Init_ReturnsFalseIfAddressNotSet(t *testing.T) {
freeRADIUS := New()
freeRADIUS.Address = ""
- assert.False(t, freeRADIUS.Init())
+ assert.Error(t, freeRADIUS.Init())
}
func TestFreeRADIUS_Init_ReturnsFalseIfPortNotSet(t *testing.T) {
freeRADIUS := New()
freeRADIUS.Port = 0
- assert.False(t, freeRADIUS.Init())
+ assert.Error(t, freeRADIUS.Init())
}
func TestFreeRADIUS_Init_ReturnsFalseIfSecretNotSet(t *testing.T) {
freeRADIUS := New()
freeRADIUS.Secret = ""
- assert.False(t, freeRADIUS.Init())
+ assert.Error(t, freeRADIUS.Init())
}
func TestFreeRADIUS_Check(t *testing.T) {
freeRADIUS := New()
freeRADIUS.client = newOKMockClient()
- assert.True(t, freeRADIUS.Check())
+ assert.NoError(t, freeRADIUS.Check())
}
func TestFreeRADIUS_Check_ReturnsFalseIfClientStatusReturnsError(t *testing.T) {
freeRADIUS := New()
freeRADIUS.client = newErrorMockClient()
- assert.False(t, freeRADIUS.Check())
+ assert.Error(t, freeRADIUS.Check())
}
func TestFreeRADIUS_Charts(t *testing.T) {
diff --git a/modules/freeradius/init.go b/modules/freeradius/init.go
new file mode 100644
index 000000000..9c14da0ea
--- /dev/null
+++ b/modules/freeradius/init.go
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package freeradius
+
+import (
+ "errors"
+)
+
+func (f *FreeRADIUS) validateConfig() error {
+ if f.Address == "" {
+ return errors.New("address not set")
+ }
+ if f.Port == 0 {
+ return errors.New("port not set")
+ }
+ if f.Secret == "" {
+ return errors.New("secret not set")
+ }
+ return nil
+}
diff --git a/modules/freeradius/testdata/config.json b/modules/freeradius/testdata/config.json
new file mode 100644
index 000000000..5a1939b60
--- /dev/null
+++ b/modules/freeradius/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "port": 123,
+ "secret": "ok",
+ "timeout": 123.123
+}
diff --git a/modules/freeradius/testdata/config.yaml b/modules/freeradius/testdata/config.yaml
new file mode 100644
index 000000000..939bb6654
--- /dev/null
+++ b/modules/freeradius/testdata/config.yaml
@@ -0,0 +1,6 @@
+update_every: 123
+address: "ok"
+port: 123
+secret: "ok"
+timeout: 123.123
+
diff --git a/modules/geth/config_schema.json b/modules/geth/config_schema.json
index 78d3e0abb..6f3f9ca76 100644
--- a/modules/geth/config_schema.json
+++ b/modules/geth/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/geth job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Geth collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Geth Prometheus endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:6060/debug/metrics/prometheus"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/geth/geth.go b/modules/geth/geth.go
index fe6b2bd96..f611a4d5a 100644
--- a/modules/geth/geth.go
+++ b/modules/geth/geth.go
@@ -7,10 +7,9 @@ import (
"errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -24,68 +23,65 @@ func init() {
}
func New() *Geth {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:6060/debug/metrics/prometheus",
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ return &Geth{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:6060/debug/metrics/prometheus",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
-
- return &Geth{
- Config: config,
charts: charts.Copy(),
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- }
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
- Geth struct {
- module.Base
- Config `yaml:",inline"`
+type Geth struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- prom prometheus.Prometheus
- charts *Charts
- }
-)
+ charts *Charts
-func (g Geth) validateConfig() error {
- if g.URL == "" {
- return errors.New("URL is not set")
- }
- return nil
+ prom prometheus.Prometheus
+}
+
+func (g *Geth) Configuration() any {
+ return g.Config
}
-func (g *Geth) initClient() error {
- client, err := web.NewHTTPClient(g.Client)
+func (g *Geth) Init() error {
+ if err := g.validateConfig(); err != nil {
+ g.Errorf("error on validating config: %g", err)
+ return err
+ }
+
+ prom, err := g.initPrometheusClient()
if err != nil {
+ g.Error(err)
return err
}
+ g.prom = prom
- g.prom = prometheus.New(client, g.Request)
return nil
}
-func (g *Geth) Init() bool {
- if err := g.validateConfig(); err != nil {
- g.Errorf("error on validating config: %g", err)
- return false
+func (g *Geth) Check() error {
+ mx, err := g.collect()
+ if err != nil {
+ g.Error(err)
+ return err
}
- if err := g.initClient(); err != nil {
- g.Errorf("error on initializing client: %g", err)
- return false
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
}
- return true
-}
-
-func (g *Geth) Check() bool {
- return len(g.Collect()) > 0
+ return nil
}
func (g *Geth) Charts() *Charts {
@@ -104,4 +100,8 @@ func (g *Geth) Collect() map[string]int64 {
return mx
}
-func (Geth) Cleanup() {}
+func (g *Geth) Cleanup() {
+ if g.prom != nil && g.prom.HTTPClient() != nil {
+ g.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/geth/geth_test.go b/modules/geth/geth_test.go
new file mode 100644
index 000000000..76faf42da
--- /dev/null
+++ b/modules/geth/geth_test.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package geth
+
+import (
+ "os"
+ "testing"
+
+ "github.com/netdata/go.d.plugin/agent/module"
+
+ "github.com/stretchr/testify/require"
+)
+
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestGeth_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Geth{}, dataConfigJSON, dataConfigYAML)
+}
diff --git a/modules/geth/init.go b/modules/geth/init.go
new file mode 100644
index 000000000..bf9a81712
--- /dev/null
+++ b/modules/geth/init.go
@@ -0,0 +1,24 @@
+package geth
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (g *Geth) validateConfig() error {
+ if g.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (g *Geth) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(g.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(client, g.Request), nil
+}
diff --git a/modules/geth/testdata/config.json b/modules/geth/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/geth/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/geth/testdata/config.yaml b/modules/geth/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/geth/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/haproxy/config_schema.json b/modules/haproxy/config_schema.json
index 9fa8cd111..dcfd48878 100644
--- a/modules/haproxy/config_schema.json
+++ b/modules/haproxy/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/haproxy job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HAProxy collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "HAProxy Prometheus endpoint URL.",
+ "type": "string",
+ "default": "http://127.0.0.1:8404/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/haproxy/haproxy.go b/modules/haproxy/haproxy.go
index ffc936711..5bd550823 100644
--- a/modules/haproxy/haproxy.go
+++ b/modules/haproxy/haproxy.go
@@ -4,6 +4,7 @@ package haproxy
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -29,7 +30,7 @@ func New() *Haproxy {
URL: "http://127.0.0.1:8404/metrics",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -41,38 +42,52 @@ func New() *Haproxy {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type Haproxy struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- prom prometheus.Prometheus
+ prom prometheus.Prometheus
+
validateMetrics bool
proxies map[string]bool
}
-func (h *Haproxy) Init() bool {
+func (h *Haproxy) Configuration() any {
+ return h.Config
+}
+
+func (h *Haproxy) Init() error {
if err := h.validateConfig(); err != nil {
h.Errorf("config validation: %v", err)
- return false
+ return err
}
prom, err := h.initPrometheusClient()
if err != nil {
h.Errorf("prometheus client initialization: %v", err)
- return false
+ return err
}
h.prom = prom
- return true
+ return nil
}
-func (h *Haproxy) Check() bool {
- return len(h.Collect()) > 0
+func (h *Haproxy) Check() error {
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (h *Haproxy) Charts() *module.Charts {
@@ -80,18 +95,20 @@ func (h *Haproxy) Charts() *module.Charts {
}
func (h *Haproxy) Collect() map[string]int64 {
- ms, err := h.collect()
+ mx, err := h.collect()
if err != nil {
h.Error(err)
return nil
}
- if len(ms) == 0 {
+ if len(mx) == 0 {
return nil
}
- return ms
+ return mx
}
-func (Haproxy) Cleanup() {
- // TODO: close http idle connections
+func (h *Haproxy) Cleanup() {
+ if h.prom != nil && h.prom.HTTPClient() != nil {
+ h.prom.HTTPClient().CloseIdleConnections()
+ }
}
diff --git a/modules/haproxy/haproxy_test.go b/modules/haproxy/haproxy_test.go
index c881c19f3..88600efc6 100644
--- a/modules/haproxy/haproxy_test.go
+++ b/modules/haproxy/haproxy_test.go
@@ -8,6 +8,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
@@ -16,19 +17,24 @@ import (
)
var (
- v2310Metrics, _ = os.ReadFile("testdata/v2.3.10/metrics.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer2310Metrics, _ = os.ReadFile("testdata/v2.3.10/metrics.txt")
)
-func Test_Testdata(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v2310Metrics": v2310Metrics,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer2310Metrics": dataVer2310Metrics,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Haproxy)(nil), New())
+func TestHaproxy_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Haproxy{}, dataConfigJSON, dataConfigYAML)
}
func TestHaproxy_Init(t *testing.T) {
@@ -62,9 +68,9 @@ func TestHaproxy_Init(t *testing.T) {
rdb.Config = test.config
if test.wantFail {
- assert.False(t, rdb.Init())
+ assert.Error(t, rdb.Init())
} else {
- assert.True(t, rdb.Init())
+ assert.NoError(t, rdb.Init())
}
})
}
@@ -107,9 +113,9 @@ func TestHaproxy_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, h.Check())
+ assert.Error(t, h.Check())
} else {
- assert.True(t, h.Check())
+ assert.NoError(t, h.Check())
}
})
}
@@ -181,11 +187,11 @@ func prepareCaseHaproxyV231Metrics(t *testing.T) (*Haproxy, func()) {
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(v2310Metrics)
+ _, _ = w.Write(dataVer2310Metrics)
}))
h := New()
h.URL = srv.URL
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, srv.Close
}
@@ -213,7 +219,7 @@ application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"}
}))
h := New()
h.URL = srv.URL
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, srv.Close
}
@@ -226,7 +232,7 @@ func prepareCase404Response(t *testing.T) (*Haproxy, func()) {
}))
h := New()
h.URL = srv.URL
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, srv.Close
}
@@ -235,7 +241,7 @@ func prepareCaseConnectionRefused(t *testing.T) (*Haproxy, func()) {
t.Helper()
h := New()
h.URL = "http://127.0.0.1:38001"
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, func() {}
}
diff --git a/modules/haproxy/testdata/config.json b/modules/haproxy/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/haproxy/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/haproxy/testdata/config.yaml b/modules/haproxy/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/haproxy/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/hdfs/collect.go b/modules/hdfs/collect.go
index 9879787cd..8d613e074 100644
--- a/modules/hdfs/collect.go
+++ b/modules/hdfs/collect.go
@@ -11,68 +11,51 @@ import (
"github.com/netdata/go.d.plugin/pkg/stm"
)
-type (
- rawData map[string]json.RawMessage
- rawJMX struct {
- Beans []rawData
+func (h *HDFS) collect() (map[string]int64, error) {
+ var raw rawJMX
+ err := h.client.doOKWithDecodeJSON(&raw)
+ if err != nil {
+ return nil, err
}
-)
-
-func (r rawJMX) isEmpty() bool {
- return len(r.Beans) == 0
-}
-func (r rawJMX) find(f func(rawData) bool) rawData {
- for _, v := range r.Beans {
- if f(v) {
- return v
- }
+ if raw.isEmpty() {
+ return nil, errors.New("empty response")
}
- return nil
-}
-
-func (r rawJMX) findJvm() rawData {
- f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" }
- return r.find(f)
-}
-
-func (r rawJMX) findRPCActivity() rawData {
- f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") }
- return r.find(f)
-}
-
-func (r rawJMX) findFSNameSystem() rawData {
- f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" }
- return r.find(f)
-}
-func (r rawJMX) findFSDatasetState() rawData {
- f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" }
- return r.find(f)
-}
+ mx := h.collectRawJMX(raw)
-func (r rawJMX) findDataNodeActivity() rawData {
- f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") }
- return r.find(f)
+ return stm.ToMap(mx), nil
}
-func (h *HDFS) collect() (map[string]int64, error) {
+func (h *HDFS) determineNodeType() (nodeType, error) {
var raw rawJMX
err := h.client.doOKWithDecodeJSON(&raw)
if err != nil {
- return nil, err
+ return "", err
}
if raw.isEmpty() {
- return nil, errors.New("empty response")
+ return "", errors.New("empty response")
}
- mx := h.collectRawJMX(raw)
+ jvm := raw.findJvm()
+ if jvm == nil {
+ return "", errors.New("couldn't find jvm in response")
+ }
- return stm.ToMap(mx), nil
+ v, ok := jvm["tag.ProcessName"]
+ if !ok {
+ return "", errors.New("couldn't find process name in JvmMetrics")
+ }
+
+ t := nodeType(strings.Trim(string(v), "\""))
+ if t == nameNodeType || t == dataNodeType {
+ return t, nil
+ }
+ return "", errors.New("unknown node type")
}
-func (h HDFS) collectRawJMX(raw rawJMX) *metrics {
+func (h *HDFS) collectRawJMX(raw rawJMX) *metrics {
var mx metrics
switch h.nodeType {
default:
@@ -85,7 +68,7 @@ func (h HDFS) collectRawJMX(raw rawJMX) *metrics {
return &mx
}
-func (h HDFS) collectNameNode(mx *metrics, raw rawJMX) {
+func (h *HDFS) collectNameNode(mx *metrics, raw rawJMX) {
err := h.collectJVM(mx, raw)
if err != nil {
h.Debugf("error on collecting jvm : %v", err)
@@ -102,7 +85,7 @@ func (h HDFS) collectNameNode(mx *metrics, raw rawJMX) {
}
}
-func (h HDFS) collectDataNode(mx *metrics, raw rawJMX) {
+func (h *HDFS) collectDataNode(mx *metrics, raw rawJMX) {
err := h.collectJVM(mx, raw)
if err != nil {
h.Debugf("error on collecting jvm : %v", err)
@@ -124,7 +107,7 @@ func (h HDFS) collectDataNode(mx *metrics, raw rawJMX) {
}
}
-func (h HDFS) collectJVM(mx *metrics, raw rawJMX) error {
+func (h *HDFS) collectJVM(mx *metrics, raw rawJMX) error {
v := raw.findJvm()
if v == nil {
return nil
@@ -140,7 +123,7 @@ func (h HDFS) collectJVM(mx *metrics, raw rawJMX) error {
return nil
}
-func (h HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error {
+func (h *HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error {
v := raw.findRPCActivity()
if v == nil {
return nil
@@ -156,7 +139,7 @@ func (h HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error {
return nil
}
-func (h HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error {
+func (h *HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error {
v := raw.findFSNameSystem()
if v == nil {
return nil
@@ -174,7 +157,7 @@ func (h HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error {
return nil
}
-func (h HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error {
+func (h *HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error {
v := raw.findFSDatasetState()
if v == nil {
return nil
@@ -193,7 +176,7 @@ func (h HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error {
return nil
}
-func (h HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error {
+func (h *HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error {
v := raw.findDataNodeActivity()
if v == nil {
return nil
diff --git a/modules/hdfs/config_schema.json b/modules/hdfs/config_schema.json
index 483c49301..679e3d05b 100644
--- a/modules/hdfs/config_schema.json
+++ b/modules/hdfs/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/hdfs job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HDFS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the HDFS server built-in webserver.",
+ "type": "string",
+ "default": "http://127.0.0.1:50070/jmx"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/hdfs/hdfs.go b/modules/hdfs/hdfs.go
index aa0b2efe2..58aca2966 100644
--- a/modules/hdfs/hdfs.go
+++ b/modules/hdfs/hdfs.go
@@ -5,12 +5,10 @@ package hdfs
import (
_ "embed"
"errors"
- "strings"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -23,7 +21,6 @@ func init() {
})
}
-// New creates HDFS with default values.
func New() *HDFS {
config := Config{
HTTP: web.HTTP{
@@ -31,7 +28,8 @@ func New() *HDFS {
URL: "http://127.0.0.1:50070/jmx",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second}},
+ Timeout: web.Duration(time.Second),
+ },
},
}
@@ -40,93 +38,68 @@ func New() *HDFS {
}
}
-type nodeType string
-
-const (
- dataNodeType nodeType = "DataNode"
- nameNodeType nodeType = "NameNode"
-)
-
-// Config is the HDFS module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
-// HDFS HDFS module.
-type HDFS struct {
- module.Base
- Config `yaml:",inline"`
+type (
+ HDFS struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- nodeType
- client *client
-}
-
-// Cleanup makes cleanup.
-func (HDFS) Cleanup() {}
-
-func (h HDFS) createClient() (*client, error) {
- httpClient, err := web.NewHTTPClient(h.Client)
- if err != nil {
- return nil, err
- }
-
- return newClient(httpClient, h.Request), nil
-}
-
-func (h HDFS) determineNodeType() (nodeType, error) {
- var raw rawJMX
- err := h.client.doOKWithDecodeJSON(&raw)
- if err != nil {
- return "", err
- }
+ client *client
- if raw.isEmpty() {
- return "", errors.New("empty response")
+ nodeType
}
+ nodeType string
+)
- jvm := raw.findJvm()
- if jvm == nil {
- return "", errors.New("couldn't find jvm in response")
- }
+const (
+ dataNodeType nodeType = "DataNode"
+ nameNodeType nodeType = "NameNode"
+)
- v, ok := jvm["tag.ProcessName"]
- if !ok {
- return "", errors.New("couldn't find process name in JvmMetrics")
- }
+func (h *HDFS) Configuration() any {
+ return h.Config
+}
- t := nodeType(strings.Trim(string(v), "\""))
- if t == nameNodeType || t == dataNodeType {
- return t, nil
+func (h *HDFS) Init() error {
+ if err := h.validateConfig(); err != nil {
+ h.Errorf("config validation: %v", err)
+ return err
}
- return "", errors.New("unknown node type")
-}
-// Init makes initialization.
-func (h *HDFS) Init() bool {
cl, err := h.createClient()
if err != nil {
h.Errorf("error on creating client : %v", err)
- return false
+ return err
}
h.client = cl
- return true
+ return nil
}
-// Check makes check.
-func (h *HDFS) Check() bool {
- t, err := h.determineNodeType()
+func (h *HDFS) Check() error {
+ typ, err := h.determineNodeType()
if err != nil {
h.Errorf("error on node type determination : %v", err)
- return false
+ return err
}
- h.nodeType = t
+ h.nodeType = typ
- return len(h.Collect()) > 0
+ mx, err := h.collect()
+ if err != nil {
+ h.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-// Charts returns Charts.
-func (h HDFS) Charts() *Charts {
+func (h *HDFS) Charts() *Charts {
switch h.nodeType {
default:
return nil
@@ -137,7 +110,6 @@ func (h HDFS) Charts() *Charts {
}
}
-// Collect collects metrics.
func (h *HDFS) Collect() map[string]int64 {
mx, err := h.collect()
@@ -151,3 +123,9 @@ func (h *HDFS) Collect() map[string]int64 {
return mx
}
+
+func (h *HDFS) Cleanup() {
+ if h.client != nil && h.client.httpClient != nil {
+ h.client.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/modules/hdfs/hdfs_test.go b/modules/hdfs/hdfs_test.go
index dc5b7cf0e..17351304f 100644
--- a/modules/hdfs/hdfs_test.go
+++ b/modules/hdfs/hdfs_test.go
@@ -9,52 +9,62 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testUnknownNodeData, _ = os.ReadFile("testdata/unknownnode.json")
- testDataNodeData, _ = os.ReadFile("testdata/datanode.json")
- testNameNodeData, _ = os.ReadFile("testdata/namenode.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataUnknownNodeMetrics, _ = os.ReadFile("testdata/unknownnode.json")
+ dataDataNodeMetrics, _ = os.ReadFile("testdata/datanode.json")
+ dataNameNodeMetrics, _ = os.ReadFile("testdata/namenode.json")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, testUnknownNodeData)
- assert.NotNil(t, testDataNodeData)
- assert.NotNil(t, testNameNodeData)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataUnknownNodeMetrics": dataUnknownNodeMetrics,
+ "dataDataNodeMetrics": dataDataNodeMetrics,
+ "dataNameNodeMetrics": dataNameNodeMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestHDFS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &HDFS{}, dataConfigJSON, dataConfigYAML)
}
func TestHDFS_Init(t *testing.T) {
job := New()
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
}
func TestHDFS_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) {
job := New()
job.Client.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestHDFS_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testNameNodeData)
+ _, _ = w.Write(dataNameNodeMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
- assert.True(t, job.Check())
+ assert.NoError(t, job.Check())
assert.NotZero(t, job.nodeType)
}
@@ -62,15 +72,15 @@ func TestHDFS_CheckDataNode(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testDataNodeData)
+ _, _ = w.Write(dataDataNodeMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
- assert.True(t, job.Check())
+ assert.NoError(t, job.Check())
assert.Equal(t, dataNodeType, job.nodeType)
}
@@ -78,15 +88,15 @@ func TestHDFS_CheckNameNode(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testNameNodeData)
+ _, _ = w.Write(dataNameNodeMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
- assert.True(t, job.Check())
+ assert.NoError(t, job.Check())
assert.Equal(t, nameNodeType, job.nodeType)
}
@@ -94,23 +104,23 @@ func TestHDFS_CheckErrorOnNodeTypeDetermination(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testUnknownNodeData)
+ _, _ = w.Write(dataUnknownNodeMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
- assert.False(t, job.Check())
+ assert.Error(t, job.Check())
}
func TestHDFS_CheckNoResponse(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/jmx"
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
- assert.False(t, job.Check())
+ assert.Error(t, job.Check())
}
func TestHDFS_Charts(t *testing.T) {
@@ -145,14 +155,14 @@ func TestHDFS_CollectDataNode(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testDataNodeData)
+ _, _ = w.Write(dataDataNodeMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"dna_bytes_read": 80689178,
@@ -197,14 +207,14 @@ func TestHDFS_CollectNameNode(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testNameNodeData)
+ _, _ = w.Write(dataNameNodeMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"fsns_blocks_total": 15,
@@ -256,13 +266,13 @@ func TestHDFS_CollectUnknownNode(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testUnknownNodeData)
+ _, _ = w.Write(dataUnknownNodeMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.Panics(t, func() { _ = job.Collect() })
}
@@ -270,7 +280,7 @@ func TestHDFS_CollectUnknownNode(t *testing.T) {
func TestHDFS_CollectNoResponse(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/jmx"
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.Nil(t, job.Collect())
}
@@ -285,7 +295,7 @@ func TestHDFS_CollectReceiveInvalidResponse(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.Nil(t, job.Collect())
}
@@ -300,7 +310,7 @@ func TestHDFS_CollectReceive404(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.Nil(t, job.Collect())
}
diff --git a/modules/hdfs/init.go b/modules/hdfs/init.go
new file mode 100644
index 000000000..2fbcfc32d
--- /dev/null
+++ b/modules/hdfs/init.go
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package hdfs
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (h *HDFS) validateConfig() error {
+ if h.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (h *HDFS) createClient() (*client, error) {
+ httpClient, err := web.NewHTTPClient(h.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return newClient(httpClient, h.Request), nil
+}
diff --git a/modules/hdfs/raw_data.go b/modules/hdfs/raw_data.go
new file mode 100644
index 000000000..ab434ae17
--- /dev/null
+++ b/modules/hdfs/raw_data.go
@@ -0,0 +1,51 @@
+package hdfs
+
+import (
+ "encoding/json"
+ "strings"
+)
+
+type (
+ rawData map[string]json.RawMessage
+ rawJMX struct {
+ Beans []rawData
+ }
+)
+
+func (r rawJMX) isEmpty() bool {
+ return len(r.Beans) == 0
+}
+
+func (r rawJMX) find(f func(rawData) bool) rawData {
+ for _, v := range r.Beans {
+ if f(v) {
+ return v
+ }
+ }
+ return nil
+}
+
+func (r rawJMX) findJvm() rawData {
+ f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" }
+ return r.find(f)
+}
+
+func (r rawJMX) findRPCActivity() rawData {
+ f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") }
+ return r.find(f)
+}
+
+func (r rawJMX) findFSNameSystem() rawData {
+ f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" }
+ return r.find(f)
+}
+
+func (r rawJMX) findFSDatasetState() rawData {
+ f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" }
+ return r.find(f)
+}
+
+func (r rawJMX) findDataNodeActivity() rawData {
+ f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") }
+ return r.find(f)
+}
diff --git a/modules/hdfs/testdata/config.json b/modules/hdfs/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/hdfs/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/hdfs/testdata/config.yaml b/modules/hdfs/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/hdfs/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/httpcheck/config_schema.json b/modules/httpcheck/config_schema.json
index d344853f7..933bbf068 100644
--- a/modules/httpcheck/config_schema.json
+++ b/modules/httpcheck/config_schema.json
@@ -1,71 +1,202 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/httpcheck job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "accepted_statuses": {
- "type": "array",
- "items": {
- "type": "integer"
- }
- },
- "response_match": {
- "type": "string"
- },
- "cookie_file": {
- "type": "string"
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "HTTPCheck collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the HTTP endpoint.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "accepted_statuses": {
+ "title": "Status check",
+ "description": "Specifies the list of HTTP response status codes that are considered acceptable. Responses with status codes not included in this list will be categorized as 'bad status' in the status chart.",
+ "type": "array",
+ "items": {
+ "type": "integer",
+ "minimum": 100
+ },
+ "minItems": 1,
+ "uniqueItems": true,
+ "default": [
+ 200
+ ]
+ },
+ "response_match": {
+ "title": "Content check",
+ "description": "Specifies a regular expression pattern to match against the content of the HTTP response. This check is performed only if the response's status code is accepted.",
+ "type": "string"
+ },
+ "header_match": {
+ "title": "Header check",
+ "description": "Specifies a set of rules to check for specific key-value pairs in the HTTP headers of the response.",
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "exclude": {
+ "title": "Exclude",
+ "description": "Determines whether the rule checks for the presence or absence of the specified key-value pair in the HTTP headers.",
+ "type": "boolean"
+ },
+ "key": {
+ "title": "Header key",
+ "description": "Specifies the exact name of the HTTP header to check for.",
+ "type": "string"
+ },
+ "value": {
+ "title": "Header value pattern",
+ "description": "Specifies the matcher pattern to match against the value of the specified header.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "key",
+ "value"
+ ]
+ }
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
+ "required": [
+ "url",
+ "accepted_statuses"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Checks",
+ "fields": [
+ "accepted_statuses",
+ "response_match",
+ "header_match"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_key": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/httpcheck/httpcheck.go b/modules/httpcheck/httpcheck.go
index abb2c821e..7945e86d1 100644
--- a/modules/httpcheck/httpcheck.go
+++ b/modules/httpcheck/httpcheck.go
@@ -4,13 +4,13 @@ package httpcheck
import (
_ "embed"
+ "errors"
"net/http"
"regexp"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -31,52 +31,56 @@ func New() *HTTPCheck {
Config: Config{
HTTP: web.HTTP{
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
AcceptedStatuses: []int{200},
},
+
acceptedStatuses: make(map[int]bool),
}
}
type (
Config struct {
- web.HTTP `yaml:",inline"`
- UpdateEvery int `yaml:"update_every"`
- AcceptedStatuses []int `yaml:"status_accepted"`
- ResponseMatch string `yaml:"response_match"`
- CookieFile string `yaml:"cookie_file"`
- HeaderMatch []HeaderMatchConfig `yaml:"header_match"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ AcceptedStatuses []int `yaml:"status_accepted" json:"status_accepted"`
+ ResponseMatch string `yaml:"response_match" json:"response_match"`
+ CookieFile string `yaml:"cookie_file" json:"cookie_file"`
+ HeaderMatch []headerMatchConfig `yaml:"header_match" json:"header_match"`
}
- HeaderMatchConfig struct {
- Exclude bool `yaml:"exclude"`
- Key string `yaml:"key"`
- Value string `yaml:"value"`
+ headerMatchConfig struct {
+ Exclude bool `yaml:"exclude" json:"exclude"`
+ Key string `yaml:"key" json:"key"`
+ Value string `yaml:"value" json:"value"`
}
)
type HTTPCheck struct {
module.Base
- Config `yaml:",inline"`
-
- httpClient *http.Client
+ Config `yaml:",inline" json:""`
charts *module.Charts
- acceptedStatuses map[int]bool
- reResponse *regexp.Regexp
- headerMatch []headerMatch
+ httpClient *http.Client
+ acceptedStatuses map[int]bool
+ reResponse *regexp.Regexp
+ headerMatch []headerMatch
cookieFileModTime time.Time
metrics metrics
}
-func (hc *HTTPCheck) Init() bool {
+func (hc *HTTPCheck) Configuration() any {
+ return hc.Config
+}
+
+func (hc *HTTPCheck) Init() error {
if err := hc.validateConfig(); err != nil {
hc.Errorf("config validation: %v", err)
- return false
+ return err
}
hc.charts = hc.initCharts()
@@ -84,21 +88,21 @@ func (hc *HTTPCheck) Init() bool {
httpClient, err := hc.initHTTPClient()
if err != nil {
hc.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
hc.httpClient = httpClient
re, err := hc.initResponseMatchRegexp()
if err != nil {
hc.Errorf("init response match regexp: %v", err)
- return false
+ return err
}
hc.reResponse = re
hm, err := hc.initHeaderMatch()
if err != nil {
hc.Errorf("init header match: %v", err)
- return false
+ return err
}
hc.headerMatch = hm
@@ -107,17 +111,25 @@ func (hc *HTTPCheck) Init() bool {
}
hc.Debugf("using URL %s", hc.URL)
- hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration)
+ hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration())
hc.Debugf("using accepted HTTP statuses %v", hc.AcceptedStatuses)
if hc.reResponse != nil {
hc.Debugf("using response match regexp %s", hc.reResponse)
}
- return true
+ return nil
}
-func (hc *HTTPCheck) Check() bool {
- return len(hc.Collect()) > 0
+func (hc *HTTPCheck) Check() error {
+ mx, err := hc.collect()
+ if err != nil {
+ hc.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (hc *HTTPCheck) Charts() *module.Charts {
diff --git a/modules/httpcheck/httpcheck_test.go b/modules/httpcheck/httpcheck_test.go
index 9d866e093..bc398da14 100644
--- a/modules/httpcheck/httpcheck_test.go
+++ b/modules/httpcheck/httpcheck_test.go
@@ -3,8 +3,10 @@
package httpcheck
import (
+ "github.com/netdata/go.d.plugin/agent/module"
"net/http"
"net/http/httptest"
+ "os"
"testing"
"time"
@@ -14,6 +16,24 @@ import (
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestHTTPCheck_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &HTTPCheck{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestHTTPCheck_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -56,9 +76,9 @@ func TestHTTPCheck_Init(t *testing.T) {
httpCheck.Config = test.config
if test.wantFail {
- assert.False(t, httpCheck.Init())
+ assert.Error(t, httpCheck.Init())
} else {
- assert.True(t, httpCheck.Init())
+ assert.NoError(t, httpCheck.Init())
}
})
}
@@ -80,7 +100,7 @@ func TestHTTPCheck_Charts(t *testing.T) {
prepare: func(t *testing.T) *HTTPCheck {
httpCheck := New()
httpCheck.URL = "http://127.0.0.1:38001"
- require.True(t, httpCheck.Init())
+ require.NoError(t, httpCheck.Init())
return httpCheck
},
@@ -105,7 +125,7 @@ func TestHTTPCheck_Cleanup(t *testing.T) {
assert.NotPanics(t, httpCheck.Cleanup)
httpCheck.URL = "http://127.0.0.1:38001"
- require.True(t, httpCheck.Init())
+ require.NoError(t, httpCheck.Init())
assert.NotPanics(t, httpCheck.Cleanup)
}
@@ -129,12 +149,12 @@ func TestHTTPCheck_Check(t *testing.T) {
httpCheck, cleanup := test.prepare()
defer cleanup()
- require.True(t, httpCheck.Init())
+ require.NoError(t, httpCheck.Init())
if test.wantFail {
- assert.False(t, httpCheck.Check())
+ assert.Error(t, httpCheck.Check())
} else {
- assert.True(t, httpCheck.Check())
+ assert.NoError(t, httpCheck.Check())
}
})
}
@@ -255,7 +275,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match include no value success case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Key: "header-key2"},
}
},
@@ -275,7 +295,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match include with value success case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Key: "header-key2", Value: "= header-value"},
}
},
@@ -295,7 +315,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match include no value bad headers case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Key: "header-key99"},
}
},
@@ -315,7 +335,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match include with value bad headers case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Key: "header-key2", Value: "= header-value99"},
}
},
@@ -335,7 +355,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match exclude no value success case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Exclude: true, Key: "header-key99"},
}
},
@@ -355,7 +375,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match exclude with value success case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Exclude: true, Key: "header-key2", Value: "= header-value99"},
}
},
@@ -375,7 +395,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match exclude no value bad headers case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Exclude: true, Key: "header-key2"},
}
},
@@ -395,7 +415,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
"header match exclude with value bad headers case": {
prepare: prepareSuccessCase,
update: func(httpCheck *HTTPCheck) {
- httpCheck.HeaderMatch = []HeaderMatchConfig{
+ httpCheck.HeaderMatch = []headerMatchConfig{
{Exclude: true, Key: "header-key2", Value: "= header-value"},
}
},
@@ -438,7 +458,7 @@ func TestHTTPCheck_Collect(t *testing.T) {
test.update(httpCheck)
}
- require.True(t, httpCheck.Init())
+ require.NoError(t, httpCheck.Init())
var mx map[string]int64
@@ -475,11 +495,11 @@ func prepareSuccessCase() (*HTTPCheck, func()) {
func prepareTimeoutCase() (*HTTPCheck, func()) {
httpCheck := New()
httpCheck.UpdateEvery = 1
- httpCheck.Timeout.Duration = time.Millisecond * 100
+ httpCheck.Timeout = web.Duration(time.Millisecond * 100)
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- time.Sleep(httpCheck.Timeout.Duration + time.Millisecond*100)
+ time.Sleep(httpCheck.Timeout.Duration() + time.Millisecond*100)
}))
httpCheck.URL = srv.URL
diff --git a/modules/httpcheck/metadata.yaml b/modules/httpcheck/metadata.yaml
index 65833f5aa..9b919b0f9 100644
--- a/modules/httpcheck/metadata.yaml
+++ b/modules/httpcheck/metadata.yaml
@@ -51,7 +51,7 @@ modules:
list:
- name: update_every
description: Data collection frequency.
- default_value: 1
+ default_value: 5
required: false
- name: autodetection_retry
description: Recheck interval in seconds. Zero means no recheck will be scheduled.
diff --git a/modules/httpcheck/testdata/config.json b/modules/httpcheck/testdata/config.json
new file mode 100644
index 000000000..649393cdd
--- /dev/null
+++ b/modules/httpcheck/testdata/config.json
@@ -0,0 +1,32 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "status_accepted": [
+ 123
+ ],
+ "response_match": "ok",
+ "cookie_file": "ok",
+ "header_match": [
+ {
+ "exclude": true,
+ "key": "ok",
+ "value": "ok"
+ }
+ ]
+}
diff --git a/modules/httpcheck/testdata/config.yaml b/modules/httpcheck/testdata/config.yaml
new file mode 100644
index 000000000..1a66590e6
--- /dev/null
+++ b/modules/httpcheck/testdata/config.yaml
@@ -0,0 +1,25 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+status_accepted:
+ - 123
+response_match: "ok"
+cookie_file: "ok"
+header_match:
+ - exclude: yes
+ key: "ok"
+ value: "ok"
diff --git a/modules/init.go b/modules/init.go
index 9e44cf98a..5c059975b 100644
--- a/modules/init.go
+++ b/modules/init.go
@@ -21,7 +21,6 @@ import (
_ "github.com/netdata/go.d.plugin/modules/docker_engine"
_ "github.com/netdata/go.d.plugin/modules/dockerhub"
_ "github.com/netdata/go.d.plugin/modules/elasticsearch"
- _ "github.com/netdata/go.d.plugin/modules/energid"
_ "github.com/netdata/go.d.plugin/modules/envoy"
_ "github.com/netdata/go.d.plugin/modules/example"
_ "github.com/netdata/go.d.plugin/modules/filecheck"
@@ -65,8 +64,6 @@ import (
_ "github.com/netdata/go.d.plugin/modules/redis"
_ "github.com/netdata/go.d.plugin/modules/scaleio"
_ "github.com/netdata/go.d.plugin/modules/snmp"
- _ "github.com/netdata/go.d.plugin/modules/solr"
- _ "github.com/netdata/go.d.plugin/modules/springboot2"
_ "github.com/netdata/go.d.plugin/modules/squidlog"
_ "github.com/netdata/go.d.plugin/modules/supervisord"
_ "github.com/netdata/go.d.plugin/modules/systemdunits"
diff --git a/modules/isc_dhcpd/config_schema.json b/modules/isc_dhcpd/config_schema.json
index ed860cbeb..3151b936a 100644
--- a/modules/isc_dhcpd/config_schema.json
+++ b/modules/isc_dhcpd/config_schema.json
@@ -1,36 +1,59 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/isc_dhcpd job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "leases_path": {
- "type": "string"
- },
- "pools": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ISC DHCP collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "leases_path": {
+ "title": "Leases path",
+ "description": "Specifies the file path to the ISC DHCP client lease database.",
+ "type": "string",
+ "default": "/var/lib/dhcp/dhcpd.leases"
+ },
+ "pools": {
+ "title": "IP pools",
+ "description": "Specifies a list of IP pools to monitor.",
+ "type": "array",
+ "items": {
+ "title": "IP pool",
+ "type": "object",
+ "properties": {
+ "name": {
+ "title": "Name",
+ "description": "Specifies a descriptive name for the IP pool.",
+ "type": "string"
+ },
+ "networks": {
+ "title": "Networks",
+ "description": "Specifies the IP ranges to monitor, separated by spaces. Supported formats: IP-IP, IP/mask.",
+ "type": "string"
+ }
},
- "networks": {
- "type": "string"
- }
+ "required": [
+ "name",
+ "networks"
+ ]
},
- "required": [
- "name",
- "networks"
- ]
+ "minItems": 1,
+ "uniqueItems": true,
+ "additionalItems": false
}
- }
+ },
+ "required": [
+ "leases_path",
+ "pools"
+ ]
},
- "required": [
- "name",
- "leases_path",
- "pools"
- ]
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
}
diff --git a/modules/isc_dhcpd/init.go b/modules/isc_dhcpd/init.go
index 847a4590b..de26499b0 100644
--- a/modules/isc_dhcpd/init.go
+++ b/modules/isc_dhcpd/init.go
@@ -15,7 +15,7 @@ type ipPool struct {
addresses iprange.Pool
}
-func (d DHCPd) validateConfig() error {
+func (d *DHCPd) validateConfig() error {
if d.Config.LeasesPath == "" {
return errors.New("'lease_path' parameter not set")
}
@@ -33,7 +33,7 @@ func (d DHCPd) validateConfig() error {
return nil
}
-func (d DHCPd) initPools() ([]ipPool, error) {
+func (d *DHCPd) initPools() ([]ipPool, error) {
var pools []ipPool
for i, cfg := range d.Pools {
rs, err := iprange.ParseRanges(cfg.Networks)
@@ -50,7 +50,7 @@ func (d DHCPd) initPools() ([]ipPool, error) {
return pools, nil
}
-func (d DHCPd) initCharts(pools []ipPool) (*module.Charts, error) {
+func (d *DHCPd) initCharts(pools []ipPool) (*module.Charts, error) {
charts := &module.Charts{}
if err := charts.Add(activeLeasesTotalChart.Copy()); err != nil {
diff --git a/modules/isc_dhcpd/isc_dhcpd.go b/modules/isc_dhcpd/isc_dhcpd.go
index e1f4e5764..aeb3d8c0d 100644
--- a/modules/isc_dhcpd/isc_dhcpd.go
+++ b/modules/isc_dhcpd/isc_dhcpd.go
@@ -4,6 +4,7 @@ package isc_dhcpd
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -22,67 +23,80 @@ func init() {
})
}
+func New() *DHCPd {
+ return &DHCPd{
+ Config: Config{
+ LeasesPath: "/var/lib/dhcp/dhcpd.leases",
+ },
+
+ collected: make(map[string]int64),
+ }
+}
+
type (
Config struct {
- LeasesPath string `yaml:"leases_path"`
- Pools []PoolConfig `yaml:"pools"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ LeasesPath string `yaml:"leases_path" json:"leases_path"`
+ Pools []PoolConfig `yaml:"pools" json:"pools"`
}
PoolConfig struct {
- Name string `yaml:"name"`
- Networks string `yaml:"networks"`
+ Name string `yaml:"name" json:"name"`
+ Networks string `yaml:"networks" json:"networks"`
}
)
type DHCPd struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
- charts *module.Charts
pools []ipPool
leasesModTime time.Time
collected map[string]int64
}
-func New() *DHCPd {
- return &DHCPd{
- Config: Config{
- LeasesPath: "/var/lib/dhcp/dhcpd.leases",
- },
-
- collected: make(map[string]int64),
- }
+func (d *DHCPd) Configuration() any {
+ return d.Config
}
-func (DHCPd) Cleanup() {}
-
-func (d *DHCPd) Init() bool {
+func (d *DHCPd) Init() error {
err := d.validateConfig()
if err != nil {
d.Errorf("config validation: %v", err)
- return false
+ return err
}
pools, err := d.initPools()
if err != nil {
d.Errorf("ip pools init: %v", err)
- return false
+ return err
}
d.pools = pools
charts, err := d.initCharts(pools)
if err != nil {
d.Errorf("charts init: %v", err)
- return false
+ return err
}
d.charts = charts
d.Debugf("monitoring leases file: %v", d.Config.LeasesPath)
d.Debugf("monitoring ip pools: %v", d.Config.Pools)
- return true
+
+ return nil
}
-func (d *DHCPd) Check() bool {
- return len(d.Collect()) > 0
+func (d *DHCPd) Check() error {
+ mx, err := d.collect()
+ if err != nil {
+ d.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (d *DHCPd) Charts() *module.Charts {
@@ -101,3 +115,5 @@ func (d *DHCPd) Collect() map[string]int64 {
return mx
}
+
+func (d *DHCPd) Cleanup() {}
diff --git a/modules/isc_dhcpd/isc_dhcpd_test.go b/modules/isc_dhcpd/isc_dhcpd_test.go
index 72980e469..c1d359306 100644
--- a/modules/isc_dhcpd/isc_dhcpd_test.go
+++ b/modules/isc_dhcpd/isc_dhcpd_test.go
@@ -3,6 +3,7 @@
package isc_dhcpd
import (
+ "os"
"testing"
"github.com/netdata/go.d.plugin/agent/module"
@@ -11,8 +12,22 @@ import (
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestDHCPd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &DHCPd{}, dataConfigJSON, dataConfigYAML)
}
func TestDHCPd_Cleanup(t *testing.T) {
@@ -67,9 +82,9 @@ func TestDHCPd_Init(t *testing.T) {
dhcpd.Config = test.config
if test.wantFail {
- assert.False(t, dhcpd.Init())
+ assert.Error(t, dhcpd.Init())
} else {
- assert.True(t, dhcpd.Init())
+ assert.NoError(t, dhcpd.Init())
}
})
}
@@ -91,12 +106,12 @@ func TestDHCPd_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
dhcpd := test.prepare()
- require.True(t, dhcpd.Init())
+ require.NoError(t, dhcpd.Init())
if test.wantFail {
- assert.False(t, dhcpd.Check())
+ assert.Error(t, dhcpd.Check())
} else {
- assert.True(t, dhcpd.Check())
+ assert.NoError(t, dhcpd.Check())
}
})
}
@@ -108,7 +123,7 @@ func TestDHCPd_Charts(t *testing.T) {
dhcpd.Pools = []PoolConfig{
{Name: "name", Networks: "192.0.2.0/24"},
}
- require.True(t, dhcpd.Init())
+ require.NoError(t, dhcpd.Init())
assert.NotNil(t, dhcpd.Charts())
}
@@ -209,7 +224,7 @@ func TestDHCPd_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
dhcpd := test.prepare()
- require.True(t, dhcpd.Init())
+ require.NoError(t, dhcpd.Init())
collected := dhcpd.Collect()
diff --git a/modules/isc_dhcpd/testdata/config.json b/modules/isc_dhcpd/testdata/config.json
new file mode 100644
index 000000000..945f8865e
--- /dev/null
+++ b/modules/isc_dhcpd/testdata/config.json
@@ -0,0 +1,10 @@
+{
+ "update_every": 123,
+ "leases_path": "ok",
+ "pools": [
+ {
+ "name": "ok",
+ "networks": "ok"
+ }
+ ]
+}
diff --git a/modules/isc_dhcpd/testdata/config.yaml b/modules/isc_dhcpd/testdata/config.yaml
new file mode 100644
index 000000000..a33defc55
--- /dev/null
+++ b/modules/isc_dhcpd/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+leases_path: "ok"
+pools:
+ - name: "ok"
+ networks: "ok"
diff --git a/modules/k8s_kubelet/config_schema.json b/modules/k8s_kubelet/config_schema.json
index 6e42187f2..98b42e9f0 100644
--- a/modules/k8s_kubelet/config_schema.json
+++ b/modules/k8s_kubelet/config_schema.json
@@ -1,62 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/k8s_kubelet job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kubelet collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Kubelet metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:10255/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "token_path": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/k8s_kubelet/init.go b/modules/k8s_kubelet/init.go
new file mode 100644
index 000000000..f9fcda8ce
--- /dev/null
+++ b/modules/k8s_kubelet/init.go
@@ -0,0 +1,35 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubelet
+
+import (
+ "errors"
+ "os"
+
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (k *Kubelet) validateConfig() error {
+ if k.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (k *Kubelet) initAuthToken() string {
+ bs, err := os.ReadFile(k.TokenPath)
+ if err != nil {
+ k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err)
+ }
+ return string(bs)
+}
+
+func (k *Kubelet) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(k.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(httpClient, k.Request), nil
+}
diff --git a/modules/k8s_kubelet/kubelet.go b/modules/k8s_kubelet/kubelet.go
index 7f62c9f30..9ad1b58d6 100644
--- a/modules/k8s_kubelet/kubelet.go
+++ b/modules/k8s_kubelet/kubelet.go
@@ -4,13 +4,12 @@ package k8s_kubelet
import (
_ "embed"
- "os"
+ "errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -27,78 +26,83 @@ func init() {
})
}
-// New creates Kubelet with default values.
func New() *Kubelet {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:10255/metrics",
- Headers: make(map[string]string),
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ return &Kubelet{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:10255/metrics",
+ Headers: make(map[string]string),
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
+ TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token",
},
- TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token",
- }
- return &Kubelet{
- Config: config,
charts: charts.Copy(),
collectedVMPlugins: make(map[string]bool),
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- TokenPath string `yaml:"token_path"`
- }
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ TokenPath string `yaml:"token_path" json:"token_path"`
+}
- Kubelet struct {
- module.Base
- Config `yaml:",inline"`
+type Kubelet struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- prom prometheus.Prometheus
- charts *Charts
- // volume_manager_total_volumes
- collectedVMPlugins map[string]bool
- }
-)
+ charts *Charts
-// Cleanup makes cleanup.
-func (Kubelet) Cleanup() {}
+ prom prometheus.Prometheus
-// Init makes initialization.
-func (k *Kubelet) Init() bool {
- b, err := os.ReadFile(k.TokenPath)
- if err != nil {
- k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err)
- } else {
- k.Request.Headers["Authorization"] = "Bearer " + string(b)
+ collectedVMPlugins map[string]bool // volume_manager_total_volumes
+}
+
+func (k *Kubelet) Configuration() any {
+ return k.Config
+}
+
+func (k *Kubelet) Init() error {
+ if err := k.validateConfig(); err != nil {
+ k.Errorf("config validation: %v", err)
+ return err
}
- client, err := web.NewHTTPClient(k.Client)
+ prom, err := k.initPrometheusClient()
if err != nil {
- k.Errorf("error on creating http client: %v", err)
- return false
+ k.Error(err)
+ return err
}
+ k.prom = prom
- k.prom = prometheus.New(client, k.Request)
- return true
+ if tok := k.initAuthToken(); tok != "" {
+ k.Request.Headers["Authorization"] = "Bearer " + tok
+ }
+
+ return nil
}
-// Check makes check.
-func (k *Kubelet) Check() bool {
- return len(k.Collect()) > 0
+func (k *Kubelet) Check() error {
+ mx, err := k.collect()
+ if err != nil {
+ k.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-// Charts creates Charts.
-func (k Kubelet) Charts() *Charts {
+func (k *Kubelet) Charts() *Charts {
return k.charts
}
-// Collect collects mx.
func (k *Kubelet) Collect() map[string]int64 {
mx, err := k.collect()
@@ -109,3 +113,9 @@ func (k *Kubelet) Collect() map[string]int64 {
return mx
}
+
+func (k *Kubelet) Cleanup() {
+ if k.prom != nil && k.prom.HTTPClient() != nil {
+ k.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/k8s_kubelet/kubelet_test.go b/modules/k8s_kubelet/kubelet_test.go
index a69a0724b..aa0a38fba 100644
--- a/modules/k8s_kubelet/kubelet_test.go
+++ b/modules/k8s_kubelet/kubelet_test.go
@@ -8,24 +8,33 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testMetricsData, _ = os.ReadFile("testdata/metrics.txt")
- testTokenData, _ = os.ReadFile("testdata/token.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMetrics, _ = os.ReadFile("testdata/metrics.txt")
+ dataServiceAccountToken, _ = os.ReadFile("testdata/token.txt")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, testMetricsData)
- assert.NotNil(t, testTokenData)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMetrics": dataMetrics,
+ "dataServiceAccountToken": dataServiceAccountToken,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- job := New()
-
- assert.IsType(t, (*Kubelet)(nil), job)
+func TestKubelet_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Kubelet{}, dataConfigJSON, dataConfigYAML)
}
func TestKubelet_Charts(t *testing.T) {
@@ -37,57 +46,57 @@ func TestKubelet_Cleanup(t *testing.T) {
}
func TestKubelet_Init(t *testing.T) {
- assert.True(t, New().Init())
+ assert.NoError(t, New().Init())
}
func TestKubelet_Init_ReadServiceAccountToken(t *testing.T) {
job := New()
job.TokenPath = "testdata/token.txt"
- assert.True(t, job.Init())
- assert.Equal(t, "Bearer "+string(testTokenData), job.Request.Headers["Authorization"])
+ assert.NoError(t, job.Init())
+ assert.Equal(t, "Bearer "+string(dataServiceAccountToken), job.Request.Headers["Authorization"])
}
func TestKubelet_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) {
job := New()
job.Client.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestKubelet_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testMetricsData)
+ _, _ = w.Write(dataMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestKubelet_Check_ConnectionRefused(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestKubelet_Collect(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testMetricsData)
+ _, _ = w.Write(dataMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"apiserver_audit_requests_rejected_total": 0,
@@ -185,8 +194,8 @@ func TestKubelet_Collect_ReceiveInvalidResponse(t *testing.T) {
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestKubelet_Collect_Receive404(t *testing.T) {
@@ -199,6 +208,6 @@ func TestKubelet_Collect_Receive404(t *testing.T) {
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/k8s_kubelet/testdata/config.json b/modules/k8s_kubelet/testdata/config.json
new file mode 100644
index 000000000..d85483953
--- /dev/null
+++ b/modules/k8s_kubelet/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "token_path": "ok"
+}
diff --git a/modules/k8s_kubelet/testdata/config.yaml b/modules/k8s_kubelet/testdata/config.yaml
new file mode 100644
index 000000000..f10534111
--- /dev/null
+++ b/modules/k8s_kubelet/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+token_path: "ok"
\ No newline at end of file
diff --git a/modules/k8s_kubeproxy/config_schema.json b/modules/k8s_kubeproxy/config_schema.json
index c26231397..b7f93d65c 100644
--- a/modules/k8s_kubeproxy/config_schema.json
+++ b/modules/k8s_kubeproxy/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/k8s_kubeproxy job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kubeproxy collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "Kubeproxy metrics endpoint URL.",
+ "type": "string",
+ "default": "http://127.0.0.1:10249/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/k8s_kubeproxy/init.go b/modules/k8s_kubeproxy/init.go
new file mode 100644
index 000000000..39b46d353
--- /dev/null
+++ b/modules/k8s_kubeproxy/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package k8s_kubeproxy
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (kp *KubeProxy) validateConfig() error {
+ if kp.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (kp *KubeProxy) initPrometheusClient() (prometheus.Prometheus, error) {
+ httpClient, err := web.NewHTTPClient(kp.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(httpClient, kp.Request), nil
+}
diff --git a/modules/k8s_kubeproxy/kubeproxy.go b/modules/k8s_kubeproxy/kubeproxy.go
index a681619c4..530a0e74e 100644
--- a/modules/k8s_kubeproxy/kubeproxy.go
+++ b/modules/k8s_kubeproxy/kubeproxy.go
@@ -4,17 +4,12 @@ package k8s_kubeproxy
import (
_ "embed"
+ "errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-const (
- defaultURL = "http://127.0.0.1:10249/metrics"
- defaultHTTPTimeout = time.Second * 2
)
//go:embed "config_schema.json"
@@ -31,70 +26,72 @@ func init() {
})
}
-// New creates KubeProxy with default values.
func New() *KubeProxy {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &KubeProxy{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:10249/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
- return &KubeProxy{
- Config: config,
charts: charts.Copy(),
}
}
-// Config is the KubeProxy module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
-// KubeProxy is KubeProxy module.
type KubeProxy struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
- prom prometheus.Prometheus
charts *Charts
+
+ prom prometheus.Prometheus
}
-// Cleanup makes cleanup.
-func (KubeProxy) Cleanup() {}
+func (kp *KubeProxy) Configuration() any {
+ return kp.Config
+}
-// Init makes initialization.
-func (kp *KubeProxy) Init() bool {
- if kp.URL == "" {
- kp.Error("URL not set")
- return false
+func (kp *KubeProxy) Init() error {
+ if err := kp.validateConfig(); err != nil {
+ kp.Errorf("config validation: %v", err)
+ return err
}
- client, err := web.NewHTTPClient(kp.Client)
+ prom, err := kp.initPrometheusClient()
if err != nil {
- kp.Errorf("error on creating http client : %v", err)
- return false
+ kp.Error(err)
+ return err
}
+ kp.prom = prom
- kp.prom = prometheus.New(client, kp.Request)
-
- return true
+ return nil
}
-// Check makes check.
-func (kp *KubeProxy) Check() bool {
- return len(kp.Collect()) > 0
+func (kp *KubeProxy) Check() error {
+ mx, err := kp.collect()
+ if err != nil {
+ kp.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-// Charts creates Charts.
-func (kp KubeProxy) Charts() *Charts {
+func (kp *KubeProxy) Charts() *Charts {
return kp.charts
}
-// Collect collects metrics.
func (kp *KubeProxy) Collect() map[string]int64 {
mx, err := kp.collect()
@@ -105,3 +102,9 @@ func (kp *KubeProxy) Collect() map[string]int64 {
return mx
}
+
+func (kp *KubeProxy) Cleanup() {
+ if kp.prom != nil && kp.prom.HTTPClient() != nil {
+ kp.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/k8s_kubeproxy/kubeproxy_test.go b/modules/k8s_kubeproxy/kubeproxy_test.go
index 4c1831a99..8f1f811e9 100644
--- a/modules/k8s_kubeproxy/kubeproxy_test.go
+++ b/modules/k8s_kubeproxy/kubeproxy_test.go
@@ -8,65 +8,84 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-var testMetrics, _ = os.ReadFile("testdata/metrics.txt")
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
-func TestNew(t *testing.T) {
- job := New()
+ dataMetrics, _ = os.ReadFile("testdata/metrics.txt")
+)
- assert.IsType(t, (*KubeProxy)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMetrics": dataMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestKubeProxy_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
+func TestKubeProxy_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &KubeProxy{}, dataConfigJSON, dataConfigYAML)
+}
-func TestKubeProxy_Cleanup(t *testing.T) { New().Cleanup() }
+func TestKubeProxy_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
+
+func TestKubeProxy_Cleanup(t *testing.T) {
+ New().Cleanup()
+}
-func TestKubeProxy_Init(t *testing.T) { assert.True(t, New().Init()) }
+func TestKubeProxy_Init(t *testing.T) {
+ assert.NoError(t, New().Init())
+}
func TestKubeProxy_InitNG(t *testing.T) {
job := New()
job.URL = ""
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestKubeProxy_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testMetrics)
+ _, _ = w.Write(dataMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestKubeProxy_CheckNG(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestKubeProxy_Collect(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testMetrics)
+ _, _ = w.Write(dataMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"sync_proxy_rules_count": 2669,
@@ -108,8 +127,8 @@ func TestKubeProxy_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestKubeProxy_404(t *testing.T) {
@@ -122,6 +141,6 @@ func TestKubeProxy_404(t *testing.T) {
job := New()
job.URL = ts.URL + "/metrics"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/k8s_kubeproxy/testdata/config.json b/modules/k8s_kubeproxy/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/k8s_kubeproxy/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/k8s_kubeproxy/testdata/config.yaml b/modules/k8s_kubeproxy/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/k8s_kubeproxy/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/k8s_state/config_schema.json b/modules/k8s_state/config_schema.json
index 42b6b0fd6..435b0748b 100644
--- a/modules/k8s_state/config_schema.json
+++ b/modules/k8s_state/config_schema.json
@@ -1,13 +1,21 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/k8s_state job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Kubernetes Cluster State collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update Every",
+ "description": "The data collection frequency in seconds.",
+ "minimum": 1,
+ "default": 1,
+ "type": "integer"
+ }
}
},
- "required": [
- "name"
- ]
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
}
diff --git a/modules/k8s_state/kube_state.go b/modules/k8s_state/kube_state.go
index 3a3046e47..de93c9988 100644
--- a/modules/k8s_state/kube_state.go
+++ b/modules/k8s_state/kube_state.go
@@ -5,6 +5,8 @@ package k8s_state
import (
"context"
_ "embed"
+ "errors"
+ "fmt"
"sync"
"time"
@@ -37,41 +39,48 @@ func New() *KubeState {
}
}
-type (
- discoverer interface {
- run(ctx context.Context, in chan<- resource)
- ready() bool
- stopped() bool
- }
+type Config struct {
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+type (
KubeState struct {
module.Base
-
- newKubeClient func() (kubernetes.Interface, error)
-
- startTime time.Time
- initDelay time.Duration
+ Config `yaml:",inline" json:""`
charts *module.Charts
- client kubernetes.Interface
- once *sync.Once
- wg *sync.WaitGroup
- discoverer discoverer
- ctx context.Context
- ctxCancel context.CancelFunc
- state *kubeState
+ client kubernetes.Interface
+ newKubeClient func() (kubernetes.Interface, error)
+ startTime time.Time
+ initDelay time.Duration
+ once *sync.Once
+ wg *sync.WaitGroup
+ discoverer discoverer
+ ctx context.Context
+ ctxCancel context.CancelFunc
kubeClusterID string
kubeClusterName string
+
+ state *kubeState
+ }
+ discoverer interface {
+ run(ctx context.Context, in chan<- resource)
+ ready() bool
+ stopped() bool
}
)
-func (ks *KubeState) Init() bool {
+func (ks *KubeState) Configuration() any {
+ return ks.Config
+}
+
+func (ks *KubeState) Init() error {
client, err := ks.initClient()
if err != nil {
ks.Errorf("client initialization: %v", err)
- return false
+ return err
}
ks.client = client
@@ -79,23 +88,25 @@ func (ks *KubeState) Init() bool {
ks.discoverer = ks.initDiscoverer(ks.client)
- return true
+ return nil
}
-func (ks *KubeState) Check() bool {
+func (ks *KubeState) Check() error {
if ks.client == nil || ks.discoverer == nil {
ks.Error("not initialized job")
- return false
+ return errors.New("not initialized")
}
ver, err := ks.client.Discovery().ServerVersion()
if err != nil {
- ks.Errorf("failed to connect to the Kubernetes API server: %v", err)
- return false
+ err := fmt.Errorf("failed to connect to K8s API server: %v", err)
+ ks.Error(err)
+ return err
}
ks.Infof("successfully connected to the Kubernetes API server '%s'", ver)
- return true
+
+ return nil
}
func (ks *KubeState) Charts() *module.Charts {
@@ -123,7 +134,7 @@ func (ks *KubeState) Cleanup() {
c := make(chan struct{})
go func() { defer close(c); ks.wg.Wait() }()
- t := time.NewTimer(time.Second * 3)
+ t := time.NewTimer(time.Second * 5)
defer t.Stop()
select {
diff --git a/modules/k8s_state/kube_state_test.go b/modules/k8s_state/kube_state_test.go
index 451028532..9cf4f0cc4 100644
--- a/modules/k8s_state/kube_state_test.go
+++ b/modules/k8s_state/kube_state_test.go
@@ -6,6 +6,7 @@ import (
"context"
"errors"
"fmt"
+ "os"
"strings"
"testing"
"time"
@@ -23,8 +24,22 @@ import (
"k8s.io/client-go/kubernetes/fake"
)
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestKubeState_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &KubeState{}, dataConfigJSON, dataConfigYAML)
}
func TestKubeState_Init(t *testing.T) {
@@ -55,9 +70,9 @@ func TestKubeState_Init(t *testing.T) {
ks := test.prepare()
if test.wantFail {
- assert.False(t, ks.Init())
+ assert.Error(t, ks.Init())
} else {
- assert.True(t, ks.Init())
+ assert.NoError(t, ks.Init())
}
})
}
@@ -90,12 +105,12 @@ func TestKubeState_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
ks := test.prepare()
- require.True(t, ks.Init())
+ require.NoError(t, ks.Init())
if test.wantFail {
- assert.False(t, ks.Check())
+ assert.Error(t, ks.Check())
} else {
- assert.True(t, ks.Check())
+ assert.NoError(t, ks.Check())
}
})
}
@@ -663,8 +678,8 @@ func TestKubeState_Collect(t *testing.T) {
ks := New()
ks.newKubeClient = func() (kubernetes.Interface, error) { return test.client, nil }
- require.True(t, ks.Init())
- require.True(t, ks.Check())
+ require.NoError(t, ks.Init())
+ require.NoError(t, ks.Check())
defer ks.Cleanup()
for i, executeStep := range test.steps {
diff --git a/modules/k8s_state/testdata/config.json b/modules/k8s_state/testdata/config.json
new file mode 100644
index 000000000..0e3f7c403
--- /dev/null
+++ b/modules/k8s_state/testdata/config.json
@@ -0,0 +1,3 @@
+{
+ "update_every": 123
+}
diff --git a/modules/k8s_state/testdata/config.yaml b/modules/k8s_state/testdata/config.yaml
new file mode 100644
index 000000000..f21a3a7a0
--- /dev/null
+++ b/modules/k8s_state/testdata/config.yaml
@@ -0,0 +1 @@
+update_every: 123
diff --git a/modules/lighttpd/config_schema.json b/modules/lighttpd/config_schema.json
index c1b51d065..7fc11274d 100644
--- a/modules/lighttpd/config_schema.json
+++ b/modules/lighttpd/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/lighttpd job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Lighttpd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Lighttpd server status page to monitor.",
+ "type": "string",
+ "default": "http://127.0.0.1/server-status?auto"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/lighttpd/init.go b/modules/lighttpd/init.go
new file mode 100644
index 000000000..f9f4baf37
--- /dev/null
+++ b/modules/lighttpd/init.go
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package lighttpd
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (l *Lighttpd) validateConfig() error {
+ if l.URL == "" {
+ return errors.New("url not set")
+ }
+ if !strings.HasSuffix(l.URL, "?auto") {
+ return fmt.Errorf("bad URL '%s', should ends in '?auto'", l.URL)
+ }
+ return nil
+}
+
+func (l *Lighttpd) initApiClient() (*apiClient, error) {
+ client, err := web.NewHTTPClient(l.Client)
+ if err != nil {
+ return nil, err
+ }
+ return newAPIClient(client, l.Request), nil
+}
diff --git a/modules/lighttpd/lighttpd.go b/modules/lighttpd/lighttpd.go
index 2f98a96bf..19e3d4581 100644
--- a/modules/lighttpd/lighttpd.go
+++ b/modules/lighttpd/lighttpd.go
@@ -4,12 +4,11 @@ package lighttpd
import (
_ "embed"
- "strings"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -22,72 +21,70 @@ func init() {
})
}
-const (
- defaultURL = "http://127.0.0.1/server-status?auto"
- defaultHTTPTimeout = time.Second * 2
-)
-
-// New creates Lighttpd with default values.
func New() *Lighttpd {
- config := Config{
+ return &Lighttpd{Config: Config{
HTTP: web.HTTP{
Request: web.Request{
- URL: defaultURL,
+ URL: "http://127.0.0.1/server-status?auto",
},
Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ Timeout: web.Duration(time.Second * 2),
},
},
- }
- return &Lighttpd{Config: config}
+ }}
}
-// Config is the Lighttpd module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type Lighttpd struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
apiClient *apiClient
}
-// Cleanup makes cleanup.
-func (Lighttpd) Cleanup() {}
-
-// Init makes initialization.
-func (l *Lighttpd) Init() bool {
- if l.URL == "" {
- l.Error("URL not set")
- return false
- }
+func (l *Lighttpd) Configuration() any {
+ return l.Config
+}
- if !strings.HasSuffix(l.URL, "?auto") {
- l.Errorf("bad URL '%s', should ends in '?auto'", l.URL)
- return false
+func (l *Lighttpd) Init() error {
+ if err := l.validateConfig(); err != nil {
+ l.Errorf("config validation: %v", err)
+ return err
}
- client, err := web.NewHTTPClient(l.Client)
+ client, err := l.initApiClient()
if err != nil {
- l.Errorf("error on creating http client : %v", err)
- return false
+ l.Error(err)
+ return err
}
- l.apiClient = newAPIClient(client, l.Request)
+ l.apiClient = client
l.Debugf("using URL %s", l.URL)
- l.Debugf("using timeout: %s", l.Timeout.Duration)
+ l.Debugf("using timeout: %s", l.Timeout.Duration())
- return true
+ return nil
}
-// Check makes check
-func (l *Lighttpd) Check() bool { return len(l.Collect()) > 0 }
+func (l *Lighttpd) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
+}
-// Charts returns Charts.
-func (l Lighttpd) Charts() *Charts { return charts.Copy() }
+func (l *Lighttpd) Charts() *Charts {
+ return charts.Copy()
+}
-// Collect collects metrics.
func (l *Lighttpd) Collect() map[string]int64 {
mx, err := l.collect()
@@ -98,3 +95,9 @@ func (l *Lighttpd) Collect() map[string]int64 {
return mx
}
+
+func (l *Lighttpd) Cleanup() {
+ if l.apiClient != nil && l.apiClient.httpClient != nil {
+ l.apiClient.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/modules/lighttpd/lighttpd_test.go b/modules/lighttpd/lighttpd_test.go
index e6a7b016e..36d0efa08 100644
--- a/modules/lighttpd/lighttpd_test.go
+++ b/modules/lighttpd/lighttpd_test.go
@@ -9,29 +9,38 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testStatusData, _ = os.ReadFile("testdata/status.txt")
- testApacheStatusData, _ = os.ReadFile("testdata/apache-status.txt")
-)
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
-func TestLighttpd_Cleanup(t *testing.T) { New().Cleanup() }
+ dataStatusMetrics, _ = os.ReadFile("testdata/status.txt")
+ dataApacheStatusMetrics, _ = os.ReadFile("testdata/apache-status.txt")
+)
-func TestNew(t *testing.T) {
- job := New()
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
- assert.Implements(t, (*module.Module)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+func TestLighttpd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Lighttpd{}, dataConfigJSON, dataConfigYAML)
}
+func TestLighttpd_Cleanup(t *testing.T) { New().Cleanup() }
+
func TestLighttpd_Init(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.NotNil(t, job.apiClient)
}
@@ -39,29 +48,29 @@ func TestLighttpd_InitNG(t *testing.T) {
job := New()
job.URL = ""
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestLighttpd_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusData)
+ _, _ = w.Write(dataStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/server-status?auto"
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestLighttpd_CheckNG(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/server-status?auto"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestLighttpd_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
@@ -70,14 +79,14 @@ func TestLighttpd_Collect(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusData)
+ _, _ = w.Write(dataStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/server-status?auto"
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"scoreboard_waiting": 125,
@@ -113,22 +122,22 @@ func TestLighttpd_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL + "/server-status?auto"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestLighttpd_ApacheData(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testApacheStatusData)
+ _, _ = w.Write(dataApacheStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/server-status?auto"
- require.True(t, job.Init())
- require.False(t, job.Check())
+ require.NoError(t, job.Init())
+ require.Error(t, job.Check())
}
func TestLighttpd_404(t *testing.T) {
@@ -141,6 +150,6 @@ func TestLighttpd_404(t *testing.T) {
job := New()
job.URL = ts.URL + "/server-status?auto"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/lighttpd/testdata/config.json b/modules/lighttpd/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/lighttpd/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/lighttpd/testdata/config.yaml b/modules/lighttpd/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/lighttpd/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/logind/config_schema.json b/modules/logind/config_schema.json
index b7ad53e9a..a1bb40720 100644
--- a/modules/logind/config_schema.json
+++ b/modules/logind/config_schema.json
@@ -1,19 +1,31 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/logind job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Logind configuration schema.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for a connection to systemds dbus endpoint.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name"
- ]
+ }
}
diff --git a/modules/logind/logind.go b/modules/logind/logind.go
index 456217e9f..1dfd92275 100644
--- a/modules/logind/logind.go
+++ b/modules/logind/logind.go
@@ -7,6 +7,7 @@ package logind
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -29,34 +30,48 @@ func init() {
func New() *Logind {
return &Logind{
Config: Config{
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second),
},
newLogindConn: func(cfg Config) (logindConnection, error) {
- return newLogindConnection(cfg.Timeout.Duration)
+ return newLogindConnection(cfg.Timeout.Duration())
},
charts: charts.Copy(),
}
}
type Config struct {
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type Logind struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
- newLogindConn func(config Config) (logindConnection, error)
conn logindConnection
- charts *module.Charts
+ newLogindConn func(config Config) (logindConnection, error)
}
-func (l *Logind) Init() bool {
- return true
+func (l *Logind) Configuration() any {
+ return l.Config
}
-func (l *Logind) Check() bool {
- return len(l.Collect()) > 0
+func (l *Logind) Init() error {
+ return nil
+}
+
+func (l *Logind) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (l *Logind) Charts() *module.Charts {
diff --git a/modules/logind/logind_test.go b/modules/logind/logind_test.go
index 07b00c168..cae5ba783 100644
--- a/modules/logind/logind_test.go
+++ b/modules/logind/logind_test.go
@@ -7,14 +7,35 @@ package logind
import (
"errors"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/coreos/go-systemd/v22/login1"
"github.com/godbus/dbus/v5"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestLogind_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Logind{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestLogind_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -32,9 +53,9 @@ func TestLogind_Init(t *testing.T) {
l.Config = test.config
if test.wantFail {
- assert.False(t, l.Init())
+ assert.Error(t, l.Init())
} else {
- assert.True(t, l.Init())
+ assert.NoError(t, l.Init())
}
})
}
@@ -55,15 +76,15 @@ func TestLogind_Cleanup(t *testing.T) {
},
"after Init": {
wantClose: false,
- prepare: func(l *Logind) { l.Init() },
+ prepare: func(l *Logind) { _ = l.Init() },
},
"after Check": {
wantClose: true,
- prepare: func(l *Logind) { l.Init(); l.Check() },
+ prepare: func(l *Logind) { _ = l.Init(); _ = l.Check() },
},
"after Collect": {
wantClose: true,
- prepare: func(l *Logind) { l.Init(); l.Collect() },
+ prepare: func(l *Logind) { _ = l.Init(); l.Collect() },
},
}
@@ -119,13 +140,13 @@ func TestLogind_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
l := New()
- require.True(t, l.Init())
+ require.NoError(t, l.Init())
l.conn = test.prepare()
if test.wantFail {
- assert.False(t, l.Check())
+ assert.Error(t, l.Check())
} else {
- assert.True(t, l.Check())
+ assert.NoError(t, l.Check())
}
})
}
@@ -193,7 +214,7 @@ func TestLogind_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
l := New()
- require.True(t, l.Init())
+ require.NoError(t, l.Init())
l.conn = test.prepare()
mx := l.Collect()
diff --git a/modules/logind/testdata/config.json b/modules/logind/testdata/config.json
new file mode 100644
index 000000000..291ecee3d
--- /dev/null
+++ b/modules/logind/testdata/config.json
@@ -0,0 +1,4 @@
+{
+ "update_every": 123,
+ "timeout": 123.123
+}
diff --git a/modules/logind/testdata/config.yaml b/modules/logind/testdata/config.yaml
new file mode 100644
index 000000000..25b0b4c78
--- /dev/null
+++ b/modules/logind/testdata/config.yaml
@@ -0,0 +1,2 @@
+update_every: 123
+timeout: 123.123
diff --git a/modules/logstash/config_schema.json b/modules/logstash/config_schema.json
index 9e4d59642..774b774d1 100644
--- a/modules/logstash/config_schema.json
+++ b/modules/logstash/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/logstash job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Logstash collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Logstash monitoring API.",
+ "type": "string",
+ "default": "http://localhost:9600"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/logstash/logstash.go b/modules/logstash/logstash.go
index 728267294..e32bccfbf 100644
--- a/modules/logstash/logstash.go
+++ b/modules/logstash/logstash.go
@@ -4,6 +4,7 @@ package logstash
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -29,7 +30,7 @@ func New() *Logstash {
URL: "http://localhost:9600",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -39,37 +40,54 @@ func New() *Logstash {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type Logstash struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
httpClient *http.Client
- charts *module.Charts
- pipelines map[string]bool
+
+ pipelines map[string]bool
}
-func (l *Logstash) Init() bool {
+func (l *Logstash) Configuration() any {
+ return l.Config
+}
+
+func (l *Logstash) Init() error {
if l.URL == "" {
l.Error("config validation: 'url' cannot be empty")
- return false
+ return errors.New("url not set")
}
httpClient, err := web.NewHTTPClient(l.Client)
if err != nil {
l.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
l.httpClient = httpClient
l.Debugf("using URL %s", l.URL)
- l.Debugf("using timeout: %s", l.Timeout.Duration)
- return true
+ l.Debugf("using timeout: %s", l.Timeout.Duration())
+
+ return nil
}
-func (l *Logstash) Check() bool {
- return len(l.Collect()) > 0
+func (l *Logstash) Check() error {
+ mx, err := l.collect()
+ if err != nil {
+ l.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (l *Logstash) Charts() *module.Charts {
diff --git a/modules/logstash/logstash_test.go b/modules/logstash/logstash_test.go
index 2b5fd32d5..ac7d1d3d9 100644
--- a/modules/logstash/logstash_test.go
+++ b/modules/logstash/logstash_test.go
@@ -3,6 +3,7 @@
package logstash
import (
+ "github.com/netdata/go.d.plugin/agent/module"
"net/http"
"net/http/httptest"
"os"
@@ -15,18 +16,27 @@ import (
)
var (
- nodeStataData, _ = os.ReadFile("testdata/stats.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNodeStatsMetrics, _ = os.ReadFile("testdata/stats.json")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "nodeStataData": nodeStataData,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNodeStatsMetrics": dataNodeStatsMetrics,
} {
require.NotNilf(t, data, name)
}
}
+func TestLogstash_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Logstash{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestLogstash_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -52,9 +62,9 @@ func TestLogstash_Init(t *testing.T) {
ls.Config = test.config
if test.wantFail {
- assert.False(t, ls.Init())
+ assert.Error(t, ls.Init())
} else {
- assert.True(t, ls.Init())
+ assert.NoError(t, ls.Init())
}
})
}
@@ -97,9 +107,9 @@ func TestLogstash_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, ls.Check())
+ assert.Error(t, ls.Check())
} else {
- assert.True(t, ls.Check())
+ assert.NoError(t, ls.Check())
}
})
}
@@ -195,14 +205,14 @@ func caseValidResponse(t *testing.T) (*Logstash, func()) {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathNodeStatsAPI:
- _, _ = w.Write(nodeStataData)
+ _, _ = w.Write(dataNodeStatsMetrics)
default:
w.WriteHeader(http.StatusNotFound)
}
}))
ls := New()
ls.URL = srv.URL
- require.True(t, ls.Init())
+ require.NoError(t, ls.Init())
return ls, srv.Close
}
@@ -215,7 +225,7 @@ func caseInvalidDataResponse(t *testing.T) (*Logstash, func()) {
}))
ls := New()
ls.URL = srv.URL
- require.True(t, ls.Init())
+ require.NoError(t, ls.Init())
return ls, srv.Close
}
@@ -224,7 +234,7 @@ func caseConnectionRefused(t *testing.T) (*Logstash, func()) {
t.Helper()
ls := New()
ls.URL = "http://127.0.0.1:65001"
- require.True(t, ls.Init())
+ require.NoError(t, ls.Init())
return ls, func() {}
}
@@ -237,7 +247,7 @@ func case404(t *testing.T) (*Logstash, func()) {
}))
ls := New()
ls.URL = srv.URL
- require.True(t, ls.Init())
+ require.NoError(t, ls.Init())
return ls, srv.Close
}
diff --git a/modules/logstash/testdata/config.json b/modules/logstash/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/logstash/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/logstash/testdata/config.yaml b/modules/logstash/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/logstash/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/mongodb/collect.go b/modules/mongodb/collect.go
index a050d217f..232145de3 100644
--- a/modules/mongodb/collect.go
+++ b/modules/mongodb/collect.go
@@ -5,7 +5,7 @@ package mongo
import "fmt"
func (m *Mongo) collect() (map[string]int64, error) {
- if err := m.conn.initClient(m.URI, m.Timeout); err != nil {
+ if err := m.conn.initClient(m.URI, m.Timeout.Duration()); err != nil {
return nil, fmt.Errorf("init mongo conn: %v", err)
}
diff --git a/modules/mongodb/config_schema.json b/modules/mongodb/config_schema.json
index 48afef584..855cb15f4 100644
--- a/modules/mongodb/config_schema.json
+++ b/modules/mongodb/config_schema.json
@@ -1,23 +1,86 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/mongodb job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MongoDB collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "uri": {
+ "title": "URI",
+ "description": "MongoDB connection string.",
+ "type": "string",
+ "default": "mongodb://localhost:27017"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Query timeout in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "databases": {
+ "title": "Database selector",
+ "description": "Configuration for monitoring specific databases. The logic for inclusion and exclusion is as follows: (include1 OR include2) AND !(exclude1 or exclude2). Patterns follow the syntax of matcher patterns.",
+ "type": "object",
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include databases that match any of the specified include patterns.",
+ "type": "array",
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude databases that match any of the specified exclude patterns.",
+ "type": "array",
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ }
},
- "uri": {
- "type": "string"
+ "required": [
+ "uri"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": "number"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "databases": {
- "type": "string"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "uri",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Database stats",
+ "fields": [
+ "databases"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "uri"
- ]
+ }
}
diff --git a/modules/mongodb/metadata.yaml b/modules/mongodb/metadata.yaml
index 20630e6ab..51f9a71d6 100644
--- a/modules/mongodb/metadata.yaml
+++ b/modules/mongodb/metadata.yaml
@@ -94,7 +94,7 @@ modules:
required: true
- name: timeout
description: Query timeout in seconds.
- default_value: 2
+ default_value: 1
required: false
- name: databases
description: Databases selector. Determines which database metrics will be collected.
diff --git a/modules/mongodb/mongodb.go b/modules/mongodb/mongodb.go
index 522acbaa0..58ba0895a 100644
--- a/modules/mongodb/mongodb.go
+++ b/modules/mongodb/mongodb.go
@@ -4,11 +4,13 @@ package mongo
import (
_ "embed"
+ "errors"
"sync"
"time"
"github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -24,8 +26,8 @@ func init() {
func New() *Mongo {
return &Mongo{
Config: Config{
- Timeout: 2,
URI: "mongodb://localhost:27017",
+ Timeout: web.Duration(time.Second),
Databases: matcher.SimpleExpr{
Includes: []string{},
Excludes: []string{},
@@ -45,45 +47,55 @@ func New() *Mongo {
}
type Config struct {
- URI string `yaml:"uri"`
- Timeout time.Duration `yaml:"timeout"`
- Databases matcher.SimpleExpr `yaml:"databases"`
+ URI string `yaml:"uri" json:"uri"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ Databases matcher.SimpleExpr `yaml:"databases" json:"databases"`
}
type Mongo struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
- charts *module.Charts
+ charts *module.Charts
+ addShardingChartsOnce *sync.Once
conn mongoConn
- dbSelector matcher.Matcher
-
- addShardingChartsOnce *sync.Once
-
+ dbSelector matcher.Matcher
optionalCharts map[string]bool
databases map[string]bool
replSetMembers map[string]bool
shards map[string]bool
}
-func (m *Mongo) Init() bool {
+func (m *Mongo) Configuration() any {
+ return m.Config
+}
+
+func (m *Mongo) Init() error {
if err := m.verifyConfig(); err != nil {
m.Errorf("config validation: %v", err)
- return false
+ return err
}
if err := m.initDatabaseSelector(); err != nil {
m.Errorf("init database selector: %v", err)
- return false
+ return err
}
- return true
+ return nil
}
-func (m *Mongo) Check() bool {
- return len(m.Collect()) > 0
+func (m *Mongo) Check() error {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (m *Mongo) Charts() *module.Charts {
diff --git a/modules/mongodb/mongodb_test.go b/modules/mongodb/mongodb_test.go
index 37da851ed..04d7d9c0a 100644
--- a/modules/mongodb/mongodb_test.go
+++ b/modules/mongodb/mongodb_test.go
@@ -9,30 +9,40 @@ import (
"testing"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "github.com/netdata/go.d.plugin/pkg/matcher"
)
var (
- dataV6MongodServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongod-serverStatus.json")
- dataV6MongosServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongos-serverStatus.json")
- dataV6DbStats, _ = os.ReadFile("testdata/v6.0.3/dbStats.json")
- dataV6ReplSetGetStatus, _ = os.ReadFile("testdata/v6.0.3/replSetGetStatus.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer6MongodServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongod-serverStatus.json")
+ dataVer6MongosServerStatus, _ = os.ReadFile("testdata/v6.0.3/mongos-serverStatus.json")
+ dataVer6DbStats, _ = os.ReadFile("testdata/v6.0.3/dbStats.json")
+ dataVer6ReplSetGetStatus, _ = os.ReadFile("testdata/v6.0.3/replSetGetStatus.json")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "dataV6MongodServerStatus": dataV6MongodServerStatus,
- "dataV6MongosServerStatus": dataV6MongosServerStatus,
- "dataV6DbStats": dataV6DbStats,
- "dataV6ReplSetGetStatus": dataV6ReplSetGetStatus,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer6MongodServerStatus": dataVer6MongodServerStatus,
+ "dataVer6MongosServerStatus": dataVer6MongosServerStatus,
+ "dataVer6DbStats": dataVer6DbStats,
+ "dataVer6ReplSetGetStatus": dataVer6ReplSetGetStatus,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestMongo_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Mongo{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestMongo_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -65,9 +75,9 @@ func TestMongo_Init(t *testing.T) {
mongo.Config = test.config
if test.wantFail {
- assert.False(t, mongo.Init())
+ assert.Error(t, mongo.Init())
} else {
- assert.True(t, mongo.Init())
+ assert.NoError(t, mongo.Init())
}
})
}
@@ -139,12 +149,12 @@ func TestMongo_Check(t *testing.T) {
defer mongo.Cleanup()
mongo.conn = test.prepare()
- require.True(t, mongo.Init())
+ require.NoError(t, mongo.Init())
if test.wantFail {
- assert.False(t, mongo.Check())
+ assert.Error(t, mongo.Check())
} else {
- assert.True(t, mongo.Check())
+ assert.NoError(t, mongo.Check())
}
})
}
@@ -590,7 +600,7 @@ func TestMongo_Collect(t *testing.T) {
defer mongo.Cleanup()
mongo.conn = test.prepare()
- require.True(t, mongo.Init())
+ require.NoError(t, mongo.Init())
mx := mongo.Collect()
@@ -641,9 +651,9 @@ func (m *mockMongoClient) serverStatus() (*documentServerStatus, error) {
return nil, errors.New("mock.serverStatus() error")
}
- data := dataV6MongodServerStatus
+ data := dataVer6MongodServerStatus
if m.mongos {
- data = dataV6MongosServerStatus
+ data = dataVer6MongosServerStatus
}
var s documentServerStatus
@@ -673,7 +683,7 @@ func (m *mockMongoClient) dbStats(_ string) (*documentDBStats, error) {
}
var s documentDBStats
- if err := json.Unmarshal(dataV6DbStats, &s); err != nil {
+ if err := json.Unmarshal(dataVer6DbStats, &s); err != nil {
return nil, err
}
@@ -703,7 +713,7 @@ func (m *mockMongoClient) replSetGetStatus() (*documentReplSetStatus, error) {
}
var s documentReplSetStatus
- if err := json.Unmarshal(dataV6ReplSetGetStatus, &s); err != nil {
+ if err := json.Unmarshal(dataVer6ReplSetGetStatus, &s); err != nil {
return nil, err
}
diff --git a/modules/mongodb/testdata/config.json b/modules/mongodb/testdata/config.json
new file mode 100644
index 000000000..2c2f63e68
--- /dev/null
+++ b/modules/mongodb/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "uri": "ok",
+ "timeout": 123.123,
+ "databases": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/modules/mongodb/testdata/config.yaml b/modules/mongodb/testdata/config.yaml
new file mode 100644
index 000000000..53529ea98
--- /dev/null
+++ b/modules/mongodb/testdata/config.yaml
@@ -0,0 +1,7 @@
+uri: "ok"
+timeout: 123.123
+databases:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/modules/mysql/collect.go b/modules/mysql/collect.go
index 3ff0882ad..796ca22ff 100644
--- a/modules/mysql/collect.go
+++ b/modules/mysql/collect.go
@@ -97,7 +97,7 @@ func (m *MySQL) openConnection() error {
db.SetConnMaxLifetime(10 * time.Minute)
- ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration())
defer cancel()
if err := db.PingContext(ctx); err != nil {
@@ -145,7 +145,7 @@ func hasTableOpenCacheOverflowsMetrics(collected map[string]int64) bool {
}
func (m *MySQL) collectQuery(query string, assign func(column, value string, lineEnd bool)) (duration int64, err error) {
- ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration())
defer cancel()
s := time.Now()
diff --git a/modules/mysql/config_schema.json b/modules/mysql/config_schema.json
index 1db919824..1e255b19c 100644
--- a/modules/mysql/config_schema.json
+++ b/modules/mysql/config_schema.json
@@ -1,29 +1,42 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/mysql job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "dsn": {
- "type": "string"
- },
- "my.cnf": {
- "type": "string"
- },
- "update_every": {
- "type": "integer"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "MySQL collector configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the MySQL server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "MySQL server Data Source Name (DSN) specifying the connection details.",
+ "type": "string",
+ "default": "netdata@tcp(localhost:3306)/"
+ },
+ "my.cnf": {
+ "title": "my.cnf path",
+ "description": "Specifies the path to the my.cnf file containing connection settings under the [client] section.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for queries executed against the MySQL server.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "dsn"
- ]
+ }
}
diff --git a/modules/mysql/mysql.go b/modules/mysql/mysql.go
index c7016098f..613d7d410 100644
--- a/modules/mysql/mysql.go
+++ b/modules/mysql/mysql.go
@@ -5,16 +5,17 @@ package mysql
import (
"database/sql"
_ "embed"
+ "errors"
"strings"
"sync"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
+
"github.com/blang/semver/v4"
"github.com/go-sql-driver/mysql"
_ "github.com/go-sql-driver/mysql"
-
- "github.com/netdata/go.d.plugin/agent/module"
- "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -31,7 +32,7 @@ func New() *MySQL {
return &MySQL{
Config: Config{
DSN: "root@tcp(localhost:3306)/",
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
charts: baseCharts.Copy(),
@@ -52,24 +53,17 @@ func New() *MySQL {
}
type Config struct {
- DSN string `yaml:"dsn"`
- MyCNF string `yaml:"my.cnf"`
- UpdateEvery int `yaml:"update_every"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ MyCNF string `yaml:"my.cnf" json:"my.cnf"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type MySQL struct {
module.Base
- Config `yaml:",inline"`
-
- db *sql.DB
- safeDSN string
- version *semver.Version
- isMariaDB bool
- isPercona bool
-
- charts *module.Charts
+ Config `yaml:",inline" json:""`
+ charts *module.Charts
addInnoDBOSLogOnce *sync.Once
addBinlogOnce *sync.Once
addMyISAMOnce *sync.Once
@@ -78,6 +72,13 @@ type MySQL struct {
addQCacheOnce *sync.Once
addTableOpenCacheOverflowsOnce *sync.Once
+ db *sql.DB
+
+ safeDSN string
+ version *semver.Version
+ isMariaDB bool
+ isPercona bool
+
doSlaveStatus bool
collectedReplConns map[string]bool
doUserStatistics bool
@@ -92,36 +93,49 @@ type MySQL struct {
varPerformanceSchema string
}
-func (m *MySQL) Init() bool {
+func (m *MySQL) Configuration() any {
+ return m.Config
+}
+
+func (m *MySQL) Init() error {
if m.MyCNF != "" {
dsn, err := dsnFromFile(m.MyCNF)
if err != nil {
m.Error(err)
- return false
+ return err
}
m.DSN = dsn
}
if m.DSN == "" {
- m.Error("DSN not set")
- return false
+ m.Error("dsn not set")
+ return errors.New("dsn not set")
}
cfg, err := mysql.ParseDSN(m.DSN)
if err != nil {
m.Errorf("error on parsing DSN: %v", err)
- return false
+ return err
}
cfg.Passwd = strings.Repeat("*", len(cfg.Passwd))
m.safeDSN = cfg.FormatDSN()
m.Debugf("using DSN [%s]", m.DSN)
- return true
+
+ return nil
}
-func (m *MySQL) Check() bool {
- return len(m.Collect()) > 0
+func (m *MySQL) Check() error {
+ mx, err := m.collect()
+ if err != nil {
+ m.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (m *MySQL) Charts() *module.Charts {
diff --git a/modules/mysql/mysql_test.go b/modules/mysql/mysql_test.go
index 283b13770..c3376ec78 100644
--- a/modules/mysql/mysql_test.go
+++ b/modules/mysql/mysql_test.go
@@ -21,79 +21,80 @@ import (
)
var (
- dataMySQLV8030Version, _ = os.ReadFile("testdata/mysql/v8.0.30/version.txt")
- dataMySQLV8030GlobalStatus, _ = os.ReadFile("testdata/mysql/v8.0.30/global_status.txt")
- dataMySQLV8030GlobalVariables, _ = os.ReadFile("testdata/mysql/v8.0.30/global_variables.txt")
- dataMySQLV8030ReplicaStatusMultiSource, _ = os.ReadFile("testdata/mysql/v8.0.30/replica_status_multi_source.txt")
- dataMySQLV8030ProcessList, _ = os.ReadFile("testdata/mysql/v8.0.30/process_list.txt")
-
- dataPerconaV8029Version, _ = os.ReadFile("testdata/percona/v8.0.29/version.txt")
- dataPerconaV8029GlobalStatus, _ = os.ReadFile("testdata/percona/v8.0.29/global_status.txt")
- dataPerconaV8029GlobalVariables, _ = os.ReadFile("testdata/percona/v8.0.29/global_variables.txt")
- dataPerconaV8029UserStatistics, _ = os.ReadFile("testdata/percona/v8.0.29/user_statistics.txt")
- dataPerconaV8029ProcessList, _ = os.ReadFile("testdata/percona/v8.0.29/process_list.txt")
-
- dataMariaV5564Version, _ = os.ReadFile("testdata/mariadb/v5.5.64/version.txt")
- dataMariaV5564GlobalStatus, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_status.txt")
- dataMariaV5564GlobalVariables, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_variables.txt")
- dataMariaV5564ProcessList, _ = os.ReadFile("testdata/mariadb/v5.5.64/process_list.txt")
-
- dataMariaV1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4/version.txt")
- dataMariaV1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_status.txt")
- dataMariaV1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_variables.txt")
- dataMariaV1084AllSlavesStatusSingleSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt")
- dataMariaV1084AllSlavesStatusMultiSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt")
- dataMariaV1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4/user_statistics.txt")
- dataMariaV1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4/process_list.txt")
-
- dataMariaGaleraClusterV1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/version.txt")
- dataMariaGaleraClusterV1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_status.txt")
- dataMariaGaleraClusterV1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt")
- dataMariaGaleraClusterV1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt")
- dataMariaGaleraClusterV1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/process_list.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMySQLVer8030Version, _ = os.ReadFile("testdata/mysql/v8.0.30/version.txt")
+ dataMySQLVer8030GlobalStatus, _ = os.ReadFile("testdata/mysql/v8.0.30/global_status.txt")
+ dataMySQLVer8030GlobalVariables, _ = os.ReadFile("testdata/mysql/v8.0.30/global_variables.txt")
+ dataMySQLVer8030ReplicaStatusMultiSource, _ = os.ReadFile("testdata/mysql/v8.0.30/replica_status_multi_source.txt")
+ dataMySQLVer8030ProcessList, _ = os.ReadFile("testdata/mysql/v8.0.30/process_list.txt")
+
+ dataPerconaVer8029Version, _ = os.ReadFile("testdata/percona/v8.0.29/version.txt")
+ dataPerconaVer8029GlobalStatus, _ = os.ReadFile("testdata/percona/v8.0.29/global_status.txt")
+ dataPerconaVer8029GlobalVariables, _ = os.ReadFile("testdata/percona/v8.0.29/global_variables.txt")
+ dataPerconaVer8029UserStatistics, _ = os.ReadFile("testdata/percona/v8.0.29/user_statistics.txt")
+ dataPerconaV8029ProcessList, _ = os.ReadFile("testdata/percona/v8.0.29/process_list.txt")
+
+ dataMariaVer5564Version, _ = os.ReadFile("testdata/mariadb/v5.5.64/version.txt")
+ dataMariaVer5564GlobalStatus, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_status.txt")
+ dataMariaVer5564GlobalVariables, _ = os.ReadFile("testdata/mariadb/v5.5.64/global_variables.txt")
+ dataMariaVer5564ProcessList, _ = os.ReadFile("testdata/mariadb/v5.5.64/process_list.txt")
+
+ dataMariaVer1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4/version.txt")
+ dataMariaVer1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_status.txt")
+ dataMariaVer1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4/global_variables.txt")
+ dataMariaVer1084AllSlavesStatusSingleSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_single_source.txt")
+ dataMariaVer1084AllSlavesStatusMultiSource, _ = os.ReadFile("testdata/mariadb/v10.8.4/all_slaves_status_multi_source.txt")
+ dataMariaVer1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4/user_statistics.txt")
+ dataMariaVer1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4/process_list.txt")
+
+ dataMariaGaleraClusterVer1084Version, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/version.txt")
+ dataMariaGaleraClusterVer1084GlobalStatus, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_status.txt")
+ dataMariaGaleraClusterVer1084GlobalVariables, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/global_variables.txt")
+ dataMariaGaleraClusterVer1084UserStatistics, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/user_statistics.txt")
+ dataMariaGaleraClusterVer1084ProcessList, _ = os.ReadFile("testdata/mariadb/v10.8.4-galera-cluster/process_list.txt")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "dataMySQLV8030Version": dataMySQLV8030Version,
- "dataMySQLV8030GlobalStatus": dataMySQLV8030GlobalStatus,
- "dataMySQLV8030GlobalVariables": dataMySQLV8030GlobalVariables,
- "dataMySQLV8030ReplicaStatusMultiSource": dataMySQLV8030ReplicaStatusMultiSource,
- "dataMySQLV8030ProcessList": dataMySQLV8030ProcessList,
-
- "dataPerconaV8029Version": dataPerconaV8029Version,
- "dataPerconaV8029GlobalStatus": dataPerconaV8029GlobalStatus,
- "dataPerconaV8029GlobalVariables": dataPerconaV8029GlobalVariables,
- "dataPerconaV8029UserStatistics": dataPerconaV8029UserStatistics,
- "dataPerconaV8029ProcessList": dataPerconaV8029ProcessList,
-
- "dataMariaV5564Version": dataMariaV5564Version,
- "dataMariaV5564GlobalStatus": dataMariaV5564GlobalStatus,
- "dataMariaV5564GlobalVariables": dataMariaV5564GlobalVariables,
- "dataMariaV5564ProcessList": dataMariaV5564ProcessList,
-
- "dataMariaV1084Version": dataMariaV1084Version,
- "dataMariaV1084GlobalStatus": dataMariaV1084GlobalStatus,
- "dataMariaV1084GlobalVariables": dataMariaV1084GlobalVariables,
- "dataMariaV1084AllSlavesStatusSingleSource": dataMariaV1084AllSlavesStatusSingleSource,
- "dataMariaV1084AllSlavesStatusMultiSource": dataMariaV1084AllSlavesStatusMultiSource,
- "dataMariaV1084UserStatistics": dataMariaV1084UserStatistics,
- "dataMariaV1084ProcessList": dataMariaV1084ProcessList,
-
- "dataMariaGaleraClusterV1084Version": dataMariaGaleraClusterV1084Version,
- "dataMariaGaleraClusterV1084GlobalStatus": dataMariaGaleraClusterV1084GlobalStatus,
- "dataMariaGaleraClusterV1084GlobalVariables": dataMariaGaleraClusterV1084GlobalVariables,
- "dataMariaGaleraClusterV1084UserStatistics": dataMariaGaleraClusterV1084UserStatistics,
- "dataMariaGaleraClusterV1084ProcessList": dataMariaGaleraClusterV1084ProcessList,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMySQLVer8030Version": dataMySQLVer8030Version,
+ "dataMySQLVer8030GlobalStatus": dataMySQLVer8030GlobalStatus,
+ "dataMySQLVer8030GlobalVariables": dataMySQLVer8030GlobalVariables,
+ "dataMySQLVer8030ReplicaStatusMultiSource": dataMySQLVer8030ReplicaStatusMultiSource,
+ "dataMySQLVer8030ProcessList": dataMySQLVer8030ProcessList,
+ "dataPerconaVer8029Version": dataPerconaVer8029Version,
+ "dataPerconaVer8029GlobalStatus": dataPerconaVer8029GlobalStatus,
+ "dataPerconaVer8029GlobalVariables": dataPerconaVer8029GlobalVariables,
+ "dataPerconaVer8029UserStatistics": dataPerconaVer8029UserStatistics,
+ "dataPerconaV8029ProcessList": dataPerconaV8029ProcessList,
+ "dataMariaVer5564Version": dataMariaVer5564Version,
+ "dataMariaVer5564GlobalStatus": dataMariaVer5564GlobalStatus,
+ "dataMariaVer5564GlobalVariables": dataMariaVer5564GlobalVariables,
+ "dataMariaVer5564ProcessList": dataMariaVer5564ProcessList,
+ "dataMariaVer1084Version": dataMariaVer1084Version,
+ "dataMariaVer1084GlobalStatus": dataMariaVer1084GlobalStatus,
+ "dataMariaVer1084GlobalVariables": dataMariaVer1084GlobalVariables,
+ "dataMariaVer1084AllSlavesStatusSingleSource": dataMariaVer1084AllSlavesStatusSingleSource,
+ "dataMariaVer1084AllSlavesStatusMultiSource": dataMariaVer1084AllSlavesStatusMultiSource,
+ "dataMariaVer1084UserStatistics": dataMariaVer1084UserStatistics,
+ "dataMariaVer1084ProcessList": dataMariaVer1084ProcessList,
+ "dataMariaGaleraClusterVer1084Version": dataMariaGaleraClusterVer1084Version,
+ "dataMariaGaleraClusterVer1084GlobalStatus": dataMariaGaleraClusterVer1084GlobalStatus,
+ "dataMariaGaleraClusterVer1084GlobalVariables": dataMariaGaleraClusterVer1084GlobalVariables,
+ "dataMariaGaleraClusterVer1084UserStatistics": dataMariaGaleraClusterVer1084UserStatistics,
+ "dataMariaGaleraClusterVer1084ProcessList": dataMariaGaleraClusterVer1084ProcessList,
} {
- require.NotNilf(t, data, fmt.Sprintf("read data: %s", name))
+ require.NotNil(t, data, fmt.Sprintf("read data: %s", name))
_, err := prepareMockRows(data)
- require.NoErrorf(t, err, fmt.Sprintf("prepare mock rows: %s", name))
+ require.NoError(t, err, fmt.Sprintf("prepare mock rows: %s", name))
}
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestMySQL_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &MySQL{}, dataConfigJSON, dataConfigYAML)
}
func TestMySQL_Init(t *testing.T) {
@@ -113,9 +114,9 @@ func TestMySQL_Init(t *testing.T) {
mySQL.Config = test.config
if test.wantFail {
- assert.False(t, mySQL.Init())
+ assert.Error(t, mySQL.Init())
} else {
- assert.True(t, mySQL.Init())
+ assert.NoError(t, mySQL.Init())
}
})
}
@@ -162,12 +163,12 @@ func TestMySQL_Check(t *testing.T) {
"success on all queries": {
wantFail: false,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
- mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource)
- mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
},
},
"fails when error on querying version": {
@@ -179,47 +180,47 @@ func TestMySQL_Check(t *testing.T) {
"fails when error on querying global status": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
mockExpectErr(m, queryShowGlobalStatus)
},
},
"fails when error on querying global variables": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
mockExpectErr(m, queryShowGlobalStatus)
},
},
"success when error on querying slave status": {
wantFail: false,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
mockExpectErr(m, queryShowAllSlavesStatus)
- mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
},
},
"success when error on querying user statistics": {
wantFail: false,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
- mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
mockExpectErr(m, queryShowUserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
},
},
"success when error on querying process list": {
wantFail: false,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
- mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource)
- mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
mockExpectErr(m, queryShowProcessList)
},
},
@@ -235,14 +236,14 @@ func TestMySQL_Check(t *testing.T) {
my.db = db
defer func() { _ = db.Close() }()
- require.True(t, my.Init())
+ require.NoError(t, my.Init())
test.prepareMock(t, mock)
if test.wantFail {
- assert.False(t, my.Check())
+ assert.Error(t, my.Check())
} else {
- assert.True(t, my.Check())
+ assert.NoError(t, my.Check())
}
assert.NoError(t, mock.ExpectationsWereMet())
})
@@ -258,11 +259,11 @@ func TestMySQL_Collect(t *testing.T) {
"MariaDB-Standalone[v5.5.46]: success on all queries": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV5564Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV5564GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV5564GlobalVariables)
+ mockExpect(t, m, queryShowVersion, dataMariaVer5564Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer5564GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer5564GlobalVariables)
mockExpect(t, m, queryShowSlaveStatus, nil)
- mockExpect(t, m, queryShowProcessList, dataMariaV5564ProcessList)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer5564ProcessList)
},
check: func(t *testing.T, my *MySQL) {
mx := my.Collect()
@@ -389,12 +390,12 @@ func TestMySQL_Collect(t *testing.T) {
"MariaDB-Standalone[v10.8.4]: success on all queries": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
mockExpect(t, m, queryShowAllSlavesStatus, nil)
- mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
},
check: func(t *testing.T, my *MySQL) {
mx := my.Collect()
@@ -568,12 +569,12 @@ func TestMySQL_Collect(t *testing.T) {
"MariaDB-SingleSourceReplication[v10.8.4]: success on all queries": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
- mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusSingleSource)
- mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusSingleSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
},
check: func(t *testing.T, my *MySQL) {
mx := my.Collect()
@@ -749,12 +750,12 @@ func TestMySQL_Collect(t *testing.T) {
"MariaDB-MultiSourceReplication[v10.8.4]: success on all queries": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
- mockExpect(t, m, queryShowAllSlavesStatus, dataMariaV1084AllSlavesStatusMultiSource)
- mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
+ mockExpect(t, m, queryShowAllSlavesStatus, dataMariaVer1084AllSlavesStatusMultiSource)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
},
check: func(t *testing.T, my *MySQL) {
mx := my.Collect()
@@ -933,12 +934,12 @@ func TestMySQL_Collect(t *testing.T) {
"MariaDB-MultiSourceReplication[v10.8.4]: error on slaves status (no permissions)": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaV1084GlobalVariables)
+ mockExpect(t, m, queryShowVersion, dataMariaVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaVer1084GlobalVariables)
mockExpectErr(m, queryShowAllSlavesStatus)
- mockExpect(t, m, queryShowUserStatistics, dataMariaV1084UserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaV1084ProcessList)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaVer1084ProcessList)
},
check: func(t *testing.T, my *MySQL) {
mx := my.Collect()
@@ -1111,12 +1112,12 @@ func TestMySQL_Collect(t *testing.T) {
"MariaDB-GaleraCluster[v10.8.4]: success on all queries": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMariaGaleraClusterV1084Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMariaGaleraClusterV1084GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMariaGaleraClusterV1084GlobalVariables)
+ mockExpect(t, m, queryShowVersion, dataMariaGaleraClusterVer1084Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMariaGaleraClusterVer1084GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMariaGaleraClusterVer1084GlobalVariables)
mockExpect(t, m, queryShowAllSlavesStatus, nil)
- mockExpect(t, m, queryShowUserStatistics, dataMariaGaleraClusterV1084UserStatistics)
- mockExpect(t, m, queryShowProcessList, dataMariaGaleraClusterV1084ProcessList)
+ mockExpect(t, m, queryShowUserStatistics, dataMariaGaleraClusterVer1084UserStatistics)
+ mockExpect(t, m, queryShowProcessList, dataMariaGaleraClusterVer1084ProcessList)
},
check: func(t *testing.T, my *MySQL) {
mx := my.Collect()
@@ -1305,11 +1306,11 @@ func TestMySQL_Collect(t *testing.T) {
"MySQL-MultiSourceReplication[v8.0.30]: success on all queries": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataMySQLV8030Version)
- mockExpect(t, m, queryShowGlobalStatus, dataMySQLV8030GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataMySQLV8030GlobalVariables)
- mockExpect(t, m, queryShowReplicaStatus, dataMySQLV8030ReplicaStatusMultiSource)
- mockExpect(t, m, queryShowProcessListPS, dataMySQLV8030ProcessList)
+ mockExpect(t, m, queryShowVersion, dataMySQLVer8030Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataMySQLVer8030GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataMySQLVer8030GlobalVariables)
+ mockExpect(t, m, queryShowReplicaStatus, dataMySQLVer8030ReplicaStatusMultiSource)
+ mockExpect(t, m, queryShowProcessListPS, dataMySQLVer8030ProcessList)
},
check: func(t *testing.T, my *MySQL) {
mx := my.Collect()
@@ -1440,11 +1441,11 @@ func TestMySQL_Collect(t *testing.T) {
"Percona-Standalone[v8.0.29]: success on all queries": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataPerconaV8029Version)
- mockExpect(t, m, queryShowGlobalStatus, dataPerconaV8029GlobalStatus)
- mockExpect(t, m, queryShowGlobalVariables, dataPerconaV8029GlobalVariables)
+ mockExpect(t, m, queryShowVersion, dataPerconaVer8029Version)
+ mockExpect(t, m, queryShowGlobalStatus, dataPerconaVer8029GlobalStatus)
+ mockExpect(t, m, queryShowGlobalVariables, dataPerconaVer8029GlobalVariables)
mockExpect(t, m, queryShowReplicaStatus, nil)
- mockExpect(t, m, queryShowUserStatistics, dataPerconaV8029UserStatistics)
+ mockExpect(t, m, queryShowUserStatistics, dataPerconaVer8029UserStatistics)
mockExpect(t, m, queryShowProcessListPS, dataPerconaV8029ProcessList)
},
check: func(t *testing.T, my *MySQL) {
@@ -1607,7 +1608,7 @@ func TestMySQL_Collect(t *testing.T) {
my.db = db
defer func() { _ = db.Close() }()
- require.True(t, my.Init())
+ require.NoError(t, my.Init())
for i, step := range test {
t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
diff --git a/modules/mysql/testdata/config.json b/modules/mysql/testdata/config.json
new file mode 100644
index 000000000..92a65cb5c
--- /dev/null
+++ b/modules/mysql/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "my.cnf": "ok",
+ "timeout": 123.123
+}
diff --git a/modules/mysql/testdata/config.yaml b/modules/mysql/testdata/config.yaml
new file mode 100644
index 000000000..9bb474b94
--- /dev/null
+++ b/modules/mysql/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+dsn: "ok"
+my.cnf: "ok"
+timeout: 123.123
diff --git a/modules/nginx/config_schema.json b/modules/nginx/config_schema.json
index 58a6865da..34f23a666 100644
--- a/modules/nginx/config_schema.json
+++ b/modules/nginx/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/nginx job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NGINX collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the NGINX status page to monitor.",
+ "type": "string",
+ "default": "http://127.0.0.1/stub_status"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/nginx/nginx.go b/modules/nginx/nginx.go
index 9acf1e72b..35662a00b 100644
--- a/modules/nginx/nginx.go
+++ b/modules/nginx/nginx.go
@@ -4,11 +4,11 @@ package nginx
import (
_ "embed"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -21,74 +21,75 @@ func init() {
})
}
-const (
- defaultURL = "http://127.0.0.1/stub_status"
- defaultHTTPTimeout = time.Second
-)
-
-// New creates Nginx with default values.
func New() *Nginx {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &Nginx{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1/stub_status",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 1),
+ },
},
- },
- }
-
- return &Nginx{Config: config}
+ }}
}
-// Config is the Nginx module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
-// Nginx nginx module.
type Nginx struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
apiClient *apiClient
}
-// Cleanup makes cleanup.
-func (Nginx) Cleanup() {}
+func (n *Nginx) Configuration() any {
+ return n.Config
+}
-// Init makes initialization.
-func (n *Nginx) Init() bool {
+func (n *Nginx) Init() error {
if n.URL == "" {
n.Error("URL not set")
- return false
+ return errors.New("url not set")
}
client, err := web.NewHTTPClient(n.Client)
if err != nil {
n.Error(err)
- return false
+ return err
}
n.apiClient = newAPIClient(client, n.Request)
n.Debugf("using URL %s", n.URL)
- n.Debugf("using timeout: %s", n.Timeout.Duration)
+ n.Debugf("using timeout: %s", n.Timeout)
- return true
+ return nil
}
-// Check makes check.
-func (n *Nginx) Check() bool { return len(n.Collect()) > 0 }
+func (n *Nginx) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
+}
-// Charts creates Charts.
-func (Nginx) Charts() *Charts { return charts.Copy() }
+func (n *Nginx) Charts() *Charts {
+ return charts.Copy()
+}
-// Collect collects metrics.
func (n *Nginx) Collect() map[string]int64 {
mx, err := n.collect()
-
if err != nil {
n.Error(err)
return nil
@@ -96,3 +97,9 @@ func (n *Nginx) Collect() map[string]int64 {
return mx
}
+
+func (n *Nginx) Cleanup() {
+ if n.apiClient != nil && n.apiClient.httpClient != nil {
+ n.apiClient.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/modules/nginx/nginx_test.go b/modules/nginx/nginx_test.go
index ef115482e..d25e2d5af 100644
--- a/modules/nginx/nginx_test.go
+++ b/modules/nginx/nginx_test.go
@@ -9,29 +9,42 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testStatusData, _ = os.ReadFile("testdata/status.txt")
- testTengineStatusData, _ = os.ReadFile("testdata/tengine-status.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatusMetrics, _ = os.ReadFile("testdata/status.txt")
+ dataTengineStatusMetrics, _ = os.ReadFile("testdata/tengine-status.txt")
)
-func TestNginx_Cleanup(t *testing.T) { New().Cleanup() }
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStatusMetrics": dataStatusMetrics,
+ "dataTengineStatusMetrics": dataTengineStatusMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
-func TestNew(t *testing.T) {
- job := New()
+func TestNginx_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Nginx{}, dataConfigJSON, dataConfigYAML)
+}
- assert.Implements(t, (*module.Module)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+func TestNginx_Cleanup(t *testing.T) {
+ New().Cleanup()
}
func TestNginx_Init(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.NotNil(t, job.apiClient)
}
@@ -39,38 +52,40 @@ func TestNginx_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusData)
+ _, _ = w.Write(dataStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestNginx_CheckNG(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/us"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
-func TestNginx_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
+func TestNginx_Charts(t *testing.T) {
+ assert.NotNil(t, New().Charts())
+}
func TestNginx_Collect(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusData)
+ _, _ = w.Write(dataStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"accepts": 36,
@@ -89,14 +104,14 @@ func TestNginx_CollectTengine(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testTengineStatusData)
+ _, _ = w.Write(dataTengineStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"accepts": 1140,
@@ -122,8 +137,8 @@ func TestNginx_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestNginx_404(t *testing.T) {
@@ -136,6 +151,6 @@ func TestNginx_404(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/nginx/testdata/config.json b/modules/nginx/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/nginx/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/nginx/testdata/config.yaml b/modules/nginx/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/nginx/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/nginxplus/config_schema.json b/modules/nginxplus/config_schema.json
index c1457d2d7..619e43a24 100644
--- a/modules/nginxplus/config_schema.json
+++ b/modules/nginxplus/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/nginxplus job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NGINX Plus collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the NGINX Plus webserver.",
+ "type": "string",
+ "default": "http://127.0.0.1"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/nginxplus/nginxplus.go b/modules/nginxplus/nginxplus.go
index ba82242f8..377433a67 100644
--- a/modules/nginxplus/nginxplus.go
+++ b/modules/nginxplus/nginxplus.go
@@ -4,6 +4,7 @@ package nginxplus
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -29,7 +30,7 @@ func New() *NginxPlus {
URL: "http://127.0.0.1",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 1},
+ Timeout: web.Duration(time.Second * 1),
},
},
},
@@ -40,20 +41,20 @@ func New() *NginxPlus {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type NginxPlus struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
httpClient *http.Client
apiVersion int64
-
- endpoints struct {
+ endpoints struct {
nginx bool
connections bool
ssl bool
@@ -68,28 +69,39 @@ type NginxPlus struct {
}
queryEndpointsTime time.Time
queryEndpointsEvery time.Duration
+ cache *cache
+}
- cache *cache
+func (n *NginxPlus) Configuration() any {
+ return n.Config
}
-func (n *NginxPlus) Init() bool {
+func (n *NginxPlus) Init() error {
if n.URL == "" {
n.Error("config validation: 'url' can not be empty'")
- return false
+ return errors.New("url not set")
}
client, err := web.NewHTTPClient(n.Client)
if err != nil {
n.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
n.httpClient = client
- return true
+ return nil
}
-func (n *NginxPlus) Check() bool {
- return len(n.Collect()) > 0
+func (n *NginxPlus) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (n *NginxPlus) Charts() *module.Charts {
diff --git a/modules/nginxplus/nginxplus_test.go b/modules/nginxplus/nginxplus_test.go
index 7bbe89557..bd68ecac0 100644
--- a/modules/nginxplus/nginxplus_test.go
+++ b/modules/nginxplus/nginxplus_test.go
@@ -9,6 +9,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/web"
"github.com/stretchr/testify/assert"
@@ -16,6 +17,9 @@ import (
)
var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
dataAPI8APIVersions, _ = os.ReadFile("testdata/api-8/api_versions.json")
dataAPI8Connections, _ = os.ReadFile("testdata/api-8/connections.json")
dataAPI8EndpointsHTTP, _ = os.ReadFile("testdata/api-8/endpoints_http.json")
@@ -35,6 +39,8 @@ var (
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
"dataAPI8APIVersions": dataAPI8APIVersions,
"dataAPI8Connections": dataAPI8Connections,
"dataAPI8EndpointsHTTP": dataAPI8EndpointsHTTP,
@@ -51,10 +57,14 @@ func Test_testDataIsValid(t *testing.T) {
"dataAPI8Resolvers": dataAPI8Resolvers,
"data404": data404,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestNginxPlus_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NginxPlus{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestNginxPlus_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -80,9 +90,9 @@ func TestNginxPlus_Init(t *testing.T) {
nginx.Config = test.config
if test.wantFail {
- assert.False(t, nginx.Init())
+ assert.Error(t, nginx.Init())
} else {
- assert.True(t, nginx.Init())
+ assert.NoError(t, nginx.Init())
}
})
}
@@ -117,9 +127,9 @@ func TestNginxPlus_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, nginx.Check())
+ assert.Error(t, nginx.Check())
} else {
- assert.True(t, nginx.Check())
+ assert.NoError(t, nginx.Check())
}
})
}
@@ -500,7 +510,7 @@ func caseAPI8AllRequestsOK(t *testing.T) (*NginxPlus, func()) {
}))
nginx := New()
nginx.URL = srv.URL
- require.True(t, nginx.Init())
+ require.NoError(t, nginx.Init())
return nginx, srv.Close
}
@@ -542,7 +552,7 @@ func caseAPI8AllRequestsExceptStreamOK(t *testing.T) (*NginxPlus, func()) {
}))
nginx := New()
nginx.URL = srv.URL
- require.True(t, nginx.Init())
+ require.NoError(t, nginx.Init())
return nginx, srv.Close
}
@@ -555,7 +565,7 @@ func caseInvalidDataResponse(t *testing.T) (*NginxPlus, func()) {
}))
nginx := New()
nginx.URL = srv.URL
- require.True(t, nginx.Init())
+ require.NoError(t, nginx.Init())
return nginx, srv.Close
}
@@ -564,7 +574,7 @@ func caseConnectionRefused(t *testing.T) (*NginxPlus, func()) {
t.Helper()
nginx := New()
nginx.URL = "http://127.0.0.1:65001"
- require.True(t, nginx.Init())
+ require.NoError(t, nginx.Init())
return nginx, func() {}
}
diff --git a/modules/nginxplus/testdata/config.json b/modules/nginxplus/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/nginxplus/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/nginxplus/testdata/config.yaml b/modules/nginxplus/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/nginxplus/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/nginxvts/config_schema.json b/modules/nginxvts/config_schema.json
index a4b44429f..5f5deab99 100644
--- a/modules/nginxvts/config_schema.json
+++ b/modules/nginxvts/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/nginxvts job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NGINX VTS module collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the NGINX VTS module status page.",
+ "type": "string",
+ "default": "http://localhost/status/format/json"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/nginxvts/init.go b/modules/nginxvts/init.go
index 7ebf049ab..59896a8ef 100644
--- a/modules/nginxvts/init.go
+++ b/modules/nginxvts/init.go
@@ -10,7 +10,7 @@ import (
"github.com/netdata/go.d.plugin/pkg/web"
)
-func (vts NginxVTS) validateConfig() error {
+func (vts *NginxVTS) validateConfig() error {
if vts.URL == "" {
return errors.New("URL not set")
}
@@ -21,11 +21,11 @@ func (vts NginxVTS) validateConfig() error {
return nil
}
-func (vts NginxVTS) initHTTPClient() (*http.Client, error) {
+func (vts *NginxVTS) initHTTPClient() (*http.Client, error) {
return web.NewHTTPClient(vts.Client)
}
-func (vts NginxVTS) initCharts() (*module.Charts, error) {
+func (vts *NginxVTS) initCharts() (*module.Charts, error) {
charts := module.Charts{}
if err := charts.Add(*mainCharts.Copy()...); err != nil {
diff --git a/modules/nginxvts/nginxvts.go b/modules/nginxvts/nginxvts.go
index 1cc3a6014..f0b40852a 100644
--- a/modules/nginxvts/nginxvts.go
+++ b/modules/nginxvts/nginxvts.go
@@ -4,6 +4,7 @@ package nginxvts
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -32,7 +33,7 @@ func New() *NginxVTS {
URL: "http://localhost/status/format/json",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -40,15 +41,21 @@ func New() *NginxVTS {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type NginxVTS struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
httpClient *http.Client
- charts *module.Charts
+}
+
+func (vts *NginxVTS) Configuration() any {
+ return vts.Config
}
func (vts *NginxVTS) Cleanup() {
@@ -58,11 +65,11 @@ func (vts *NginxVTS) Cleanup() {
vts.httpClient.CloseIdleConnections()
}
-func (vts *NginxVTS) Init() bool {
+func (vts *NginxVTS) Init() error {
err := vts.validateConfig()
if err != nil {
vts.Errorf("check configuration: %v", err)
- return false
+ return err
}
httpClient, err := vts.initHTTPClient()
@@ -74,15 +81,23 @@ func (vts *NginxVTS) Init() bool {
charts, err := vts.initCharts()
if err != nil {
vts.Errorf("init charts: %v", err)
- return false
+ return err
}
vts.charts = charts
- return true
+ return nil
}
-func (vts *NginxVTS) Check() bool {
- return len(vts.Collect()) > 0
+func (vts *NginxVTS) Check() error {
+ mx, err := vts.collect()
+ if err != nil {
+ vts.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (vts *NginxVTS) Charts() *module.Charts {
diff --git a/modules/nginxvts/nginxvts_test.go b/modules/nginxvts/nginxvts_test.go
index ef204ad75..f446f11aa 100644
--- a/modules/nginxvts/nginxvts_test.go
+++ b/modules/nginxvts/nginxvts_test.go
@@ -17,19 +17,24 @@ import (
)
var (
- v0118Response, _ = os.ReadFile("testdata/vts-v0.1.18.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer0118Response, _ = os.ReadFile("testdata/vts-v0.1.18.json")
)
-func Test_testDataIsCorrectlyReadAndValid(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v0118Response": v0118Response,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer0118Response": dataVer0118Response,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestNginxVTS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NginxVTS{}, dataConfigJSON, dataConfigYAML)
}
func TestNginxVTS_Init(t *testing.T) {
@@ -70,9 +75,9 @@ func TestNginxVTS_Init(t *testing.T) {
es.Config = test.config
if test.wantFail {
- assert.False(t, es.Init())
+ assert.Error(t, es.Init())
} else {
- assert.True(t, es.Init())
+ assert.NoError(t, es.Init())
assert.Equal(t, test.wantNumOfCharts, len(*es.Charts()))
}
})
@@ -96,9 +101,9 @@ func TestNginxVTS_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, vts.Check())
+ assert.Error(t, vts.Check())
} else {
- assert.True(t, vts.Check())
+ assert.NoError(t, vts.Check())
}
})
}
@@ -197,7 +202,7 @@ func prepareNginxVTS(t *testing.T, createNginxVTS func() *NginxVTS) (vts *NginxV
srv := prepareNginxVTSEndpoint()
vts.URL = srv.URL
- require.True(t, vts.Init())
+ require.NoError(t, vts.Init())
return vts, srv.Close
}
@@ -214,7 +219,7 @@ func prepareNginxVTSInvalidData(t *testing.T) (*NginxVTS, func()) {
}))
vts := New()
vts.URL = srv.URL
- require.True(t, vts.Init())
+ require.NoError(t, vts.Init())
return vts, srv.Close
}
@@ -227,7 +232,7 @@ func prepareNginxVTS404(t *testing.T) (*NginxVTS, func()) {
}))
vts := New()
vts.URL = srv.URL
- require.True(t, vts.Init())
+ require.NoError(t, vts.Init())
return vts, srv.Close
}
@@ -236,7 +241,7 @@ func prepareNginxVTSConnectionRefused(t *testing.T) (*NginxVTS, func()) {
t.Helper()
vts := New()
vts.URL = "http://127.0.0.1:18080"
- require.True(t, vts.Init())
+ require.NoError(t, vts.Init())
return vts, func() {}
}
@@ -246,7 +251,7 @@ func prepareNginxVTSEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case "/":
- _, _ = w.Write(v0118Response)
+ _, _ = w.Write(dataVer0118Response)
default:
w.WriteHeader(http.StatusNotFound)
}
diff --git a/modules/nginxvts/testdata/config.json b/modules/nginxvts/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/nginxvts/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/nginxvts/testdata/config.yaml b/modules/nginxvts/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/nginxvts/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/ntpd/client.go b/modules/ntpd/client.go
index 5164c80e8..8e111cd76 100644
--- a/modules/ntpd/client.go
+++ b/modules/ntpd/client.go
@@ -10,14 +10,14 @@ import (
)
func newNTPClient(c Config) (ntpConn, error) {
- conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration)
+ conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration())
if err != nil {
return nil, err
}
client := &ntpClient{
conn: conn,
- timeout: c.Timeout.Duration,
+ timeout: c.Timeout.Duration(),
client: &control.NTPClient{Connection: conn},
}
diff --git a/modules/ntpd/config_schema.json b/modules/ntpd/config_schema.json
index ef360a7f9..50ffa7efb 100644
--- a/modules/ntpd/config_schema.json
+++ b/modules/ntpd/config_schema.json
@@ -1,26 +1,45 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/ntpd job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NTPd Collector Configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the NTPd server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the NTPd daemon listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:123"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection, read, and write timeout duration in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "collect_peers": {
+ "title": "Collect peers",
+ "description": "Determines whether metrics from NTP peers will be collected.",
+ "type": "boolean"
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "collect_peers": {
- "type": "boolean"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/ntpd/metadata.yaml b/modules/ntpd/metadata.yaml
index 3b968f20c..46178b031 100644
--- a/modules/ntpd/metadata.yaml
+++ b/modules/ntpd/metadata.yaml
@@ -67,7 +67,7 @@ modules:
required: true
- name: timeout
description: Connection/read/write timeout.
- default_value: 3
+ default_value: 1
required: false
- name: collect_peers
description: Determines whether peer metrics will be collected.
diff --git a/modules/ntpd/ntpd.go b/modules/ntpd/ntpd.go
index 8bbc0ba4f..738ca67c6 100644
--- a/modules/ntpd/ntpd.go
+++ b/modules/ntpd/ntpd.go
@@ -4,6 +4,8 @@ package ntpd
import (
_ "embed"
+ "errors"
+ "fmt"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -25,7 +27,7 @@ func New() *NTPd {
return &NTPd{
Config: Config{
Address: "127.0.0.1:123",
- Timeout: web.Duration{Duration: time.Second * 3},
+ Timeout: web.Duration(time.Second),
CollectPeers: false,
},
charts: systemCharts.Copy(),
@@ -36,20 +38,21 @@ func New() *NTPd {
}
type Config struct {
- Address string `yaml:"address"`
- Timeout web.Duration `yaml:"timeout"`
- CollectPeers bool `yaml:"collect_peers"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ CollectPeers bool `yaml:"collect_peers" json:"collect_peers"`
}
type (
NTPd struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- newClient func(c Config) (ntpConn, error)
client ntpConn
+ newClient func(c Config) (ntpConn, error)
findPeersTime time.Time
findPeersEvery time.Duration
@@ -65,26 +68,38 @@ type (
}
)
-func (n *NTPd) Init() bool {
+func (n *NTPd) Configuration() any {
+ return n.Config
+}
+
+func (n *NTPd) Init() error {
if n.Address == "" {
n.Error("config validation: 'address' can not be empty")
- return false
+ return errors.New("address not set")
}
txt := "0.0.0.0 127.0.0.0/8"
r, err := iprange.ParseRanges(txt)
if err != nil {
- n.Errorf("error on parse ip range '%s': %v", txt, err)
- return false
+ n.Errorf("error on parsing ip range '%s': %v", txt, err)
+ return fmt.Errorf("error on parsing ip range '%s': %v", txt, err)
}
n.peerIPAddrFilter = r
- return true
+ return nil
}
-func (n *NTPd) Check() bool {
- return len(n.Collect()) > 0
+func (n *NTPd) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (n *NTPd) Charts() *module.Charts {
diff --git a/modules/ntpd/ntpd_test.go b/modules/ntpd/ntpd_test.go
index 481d2d7e9..93f91eda2 100644
--- a/modules/ntpd/ntpd_test.go
+++ b/modules/ntpd/ntpd_test.go
@@ -5,12 +5,33 @@ package ntpd
import (
"errors"
"fmt"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestNTPd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NTPd{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestNTPd_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -33,9 +54,9 @@ func TestNTPd_Init(t *testing.T) {
n.Config = test.config
if test.wantFail {
- assert.False(t, n.Init())
+ assert.Error(t, n.Init())
} else {
- assert.True(t, n.Init())
+ assert.NoError(t, n.Init())
}
})
}
@@ -56,15 +77,15 @@ func TestNTPd_Cleanup(t *testing.T) {
},
"after Init": {
wantClose: false,
- prepare: func(n *NTPd) { n.Init() },
+ prepare: func(n *NTPd) { _ = n.Init() },
},
"after Check": {
wantClose: true,
- prepare: func(n *NTPd) { n.Init(); n.Check() },
+ prepare: func(n *NTPd) { _ = n.Init(); _ = n.Check() },
},
"after Collect": {
wantClose: true,
- prepare: func(n *NTPd) { n.Init(); n.Collect() },
+ prepare: func(n *NTPd) { _ = n.Init(); n.Collect() },
},
}
@@ -116,12 +137,12 @@ func TestNTPd_Check(t *testing.T) {
t.Run(name, func(t *testing.T) {
n := test.prepare()
- require.True(t, n.Init())
+ require.NoError(t, n.Init())
if test.wantFail {
- assert.False(t, n.Check())
+ assert.Error(t, n.Check())
} else {
- assert.True(t, n.Check())
+ assert.NoError(t, n.Check())
}
})
}
@@ -237,7 +258,7 @@ func TestNTPd_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
n := test.prepare()
- require.True(t, n.Init())
+ require.NoError(t, n.Init())
_ = n.Check()
mx := n.Collect()
diff --git a/modules/ntpd/testdata/config.json b/modules/ntpd/testdata/config.json
new file mode 100644
index 000000000..fc8d6844f
--- /dev/null
+++ b/modules/ntpd/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "collect_peers": true
+}
diff --git a/modules/ntpd/testdata/config.yaml b/modules/ntpd/testdata/config.yaml
new file mode 100644
index 000000000..94cee8526
--- /dev/null
+++ b/modules/ntpd/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+collect_peers: yes
diff --git a/modules/nvidia_smi/config_schema.json b/modules/nvidia_smi/config_schema.json
index fc5b38e08..d921b436d 100644
--- a/modules/nvidia_smi/config_schema.json
+++ b/modules/nvidia_smi/config_schema.json
@@ -1,25 +1,46 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/nvidia_smi job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NVIDIA SMI Collector Configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the NVIDIA SMI.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "binary_path": {
+ "title": "Binary path",
+ "description": "Path to the 'nvidia-smi' binary. The default path is 'nvidia-smi', and the executable is looked for in the directories specified in the PATH environment variable.",
+ "type": "string",
+ "default": "nvidia-smi"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the 'nvidia-smi' binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 10
+ },
+ "use_csv_format": {
+ "title": "Use CSV format",
+ "description": "Determines the format used for requesting GPU information. Set to 'true' for CSV format, 'false' for XML format.",
+ "type": "boolean",
+ "default": true
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "binary_path": {
- "type": "string"
+ "required": [
+ "binary_path"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "use_csv_format": {
- "type": "boolean"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name"
- ]
+ }
}
diff --git a/modules/nvidia_smi/exec.go b/modules/nvidia_smi/exec.go
index 93e23057b..c4f1e3f2c 100644
--- a/modules/nvidia_smi/exec.go
+++ b/modules/nvidia_smi/exec.go
@@ -16,7 +16,7 @@ import (
func newNvidiaSMIExec(path string, cfg Config, log *logger.Logger) (*nvidiaSMIExec, error) {
return &nvidiaSMIExec{
binPath: path,
- timeout: cfg.Timeout.Duration,
+ timeout: cfg.Timeout.Duration(),
Logger: log,
}, nil
}
diff --git a/modules/nvidia_smi/nvidia_smi.go b/modules/nvidia_smi/nvidia_smi.go
index 1370b4335..a05d2b0d7 100644
--- a/modules/nvidia_smi/nvidia_smi.go
+++ b/modules/nvidia_smi/nvidia_smi.go
@@ -4,6 +4,7 @@ package nvidia_smi
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -27,7 +28,7 @@ func init() {
func New() *NvidiaSMI {
return &NvidiaSMI{
Config: Config{
- Timeout: web.Duration{Duration: time.Second * 10},
+ Timeout: web.Duration(time.Second * 10),
UseCSVFormat: true,
},
binName: "nvidia-smi",
@@ -39,20 +40,21 @@ func New() *NvidiaSMI {
}
type Config struct {
- Timeout web.Duration
- BinaryPath string `yaml:"binary_path"`
- UseCSVFormat bool `yaml:"use_csv_format"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ BinaryPath string `yaml:"binary_path" json:"binary_path"`
+ UseCSVFormat bool `yaml:"use_csv_format" json:"use_csv_format"`
}
type (
NvidiaSMI struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- binName string
exec nvidiaSMI
+ binName string
gpuQueryProperties []string
@@ -66,21 +68,33 @@ type (
}
)
-func (nv *NvidiaSMI) Init() bool {
+func (nv *NvidiaSMI) Configuration() any {
+ return nv.Config
+}
+
+func (nv *NvidiaSMI) Init() error {
if nv.exec == nil {
smi, err := nv.initNvidiaSMIExec()
if err != nil {
nv.Error(err)
- return false
+ return err
}
nv.exec = smi
}
- return true
+ return nil
}
-func (nv *NvidiaSMI) Check() bool {
- return len(nv.Collect()) > 0
+func (nv *NvidiaSMI) Check() error {
+ mx, err := nv.collect()
+ if err != nil {
+ nv.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (nv *NvidiaSMI) Charts() *module.Charts {
diff --git a/modules/nvidia_smi/nvidia_smi_test.go b/modules/nvidia_smi/nvidia_smi_test.go
index cdd7742fd..2814c0d76 100644
--- a/modules/nvidia_smi/nvidia_smi_test.go
+++ b/modules/nvidia_smi/nvidia_smi_test.go
@@ -8,11 +8,16 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
dataXMLRTX2080Win, _ = os.ReadFile("testdata/rtx-2080-win.xml")
dataXMLRTX4090Driver535, _ = os.ReadFile("testdata/rtx-4090-driver-535.xml")
dataXMLRTX3060, _ = os.ReadFile("testdata/rtx-3060.xml")
@@ -26,20 +31,24 @@ var (
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
"dataXMLRTX2080Win": dataXMLRTX2080Win,
"dataXMLRTX4090Driver535": dataXMLRTX4090Driver535,
"dataXMLRTX3060": dataXMLRTX3060,
"dataXMLTeslaP100": dataXMLTeslaP100,
-
- "dataXMLA100SXM4MIG": dataXMLA100SXM4MIG,
-
- "dataHelpQueryGPU": dataHelpQueryGPU,
- "dataCSVTeslaP100": dataCSVTeslaP100,
+ "dataXMLA100SXM4MIG": dataXMLA100SXM4MIG,
+ "dataHelpQueryGPU": dataHelpQueryGPU,
+ "dataCSVTeslaP100": dataCSVTeslaP100,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestNvidiaSMI_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NvidiaSMI{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestNvidiaSMI_Init(t *testing.T) {
tests := map[string]struct {
prepare func(nv *NvidiaSMI)
@@ -60,9 +69,9 @@ func TestNvidiaSMI_Init(t *testing.T) {
test.prepare(nv)
if test.wantFail {
- assert.False(t, nv.Init())
+ assert.Error(t, nv.Init())
} else {
- assert.True(t, nv.Init())
+ assert.NoError(t, nv.Init())
}
})
}
@@ -118,9 +127,9 @@ func TestNvidiaSMI_Check(t *testing.T) {
test.prepare(nv)
if test.wantFail {
- assert.False(t, nv.Check())
+ assert.Error(t, nv.Check())
} else {
- assert.True(t, nv.Check())
+ assert.NoError(t, nv.Check())
}
})
}
diff --git a/modules/nvidia_smi/testdata/config.json b/modules/nvidia_smi/testdata/config.json
new file mode 100644
index 000000000..a251e326a
--- /dev/null
+++ b/modules/nvidia_smi/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "binary_path": "ok",
+ "use_csv_format": true
+}
diff --git a/modules/nvidia_smi/testdata/config.yaml b/modules/nvidia_smi/testdata/config.yaml
new file mode 100644
index 000000000..0b580dbcb
--- /dev/null
+++ b/modules/nvidia_smi/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+timeout: 123.123
+binary_path: "ok"
+use_csv_format: yes
diff --git a/modules/nvme/config_schema.json b/modules/nvme/config_schema.json
index fcd2869d6..3a894bfc9 100644
--- a/modules/nvme/config_schema.json
+++ b/modules/nvme/config_schema.json
@@ -1,22 +1,40 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/nvme job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "NVMe Collector Configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which NVMe metrics are collected.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "binary_path": {
+ "title": "Binary path",
+ "description": "Path to the 'nvme' binary. The default path is 'nvme', and the executable is looked for in the directories specified in the PATH environment variable.",
+ "type": "string",
+ "default": "nvme"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Timeout for executing the 'nvme' binary, specified in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 10
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "required": [
+ "binary_path"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "binary_path": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name"
- ]
+ }
}
diff --git a/modules/nvme/init.go b/modules/nvme/init.go
index 70988031c..44ff90f4e 100644
--- a/modules/nvme/init.go
+++ b/modules/nvme/init.go
@@ -29,7 +29,7 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) {
n.Debug("using ndsudo")
return &nvmeCLIExec{
ndsudoPath: ndsudoPath,
- timeout: n.Timeout.Duration,
+ timeout: n.Timeout.Duration(),
}, nil
}
}
@@ -51,14 +51,14 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) {
}
if sudoPath != "" {
- ctx1, cancel1 := context.WithTimeout(context.Background(), n.Timeout.Duration)
+ ctx1, cancel1 := context.WithTimeout(context.Background(), n.Timeout.Duration())
defer cancel1()
if _, err := exec.CommandContext(ctx1, sudoPath, "-n", "-v").Output(); err != nil {
return nil, fmt.Errorf("can not run sudo on this host: %v", err)
}
- ctx2, cancel2 := context.WithTimeout(context.Background(), n.Timeout.Duration)
+ ctx2, cancel2 := context.WithTimeout(context.Background(), n.Timeout.Duration())
defer cancel2()
if _, err := exec.CommandContext(ctx2, sudoPath, "-n", "-l", nvmePath).Output(); err != nil {
@@ -69,6 +69,6 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) {
return &nvmeCLIExec{
sudoPath: sudoPath,
nvmePath: nvmePath,
- timeout: n.Timeout.Duration,
+ timeout: n.Timeout.Duration(),
}, nil
}
diff --git a/modules/nvme/nvme.go b/modules/nvme/nvme.go
index d8f86869a..a213ee4ed 100644
--- a/modules/nvme/nvme.go
+++ b/modules/nvme/nvme.go
@@ -4,6 +4,7 @@ package nvme
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -27,8 +28,9 @@ func New() *NVMe {
return &NVMe{
Config: Config{
BinaryPath: "nvme",
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second * 2),
},
+
charts: &module.Charts{},
devicePaths: make(map[string]bool),
listDevicesEvery: time.Minute * 10,
@@ -37,14 +39,15 @@ func New() *NVMe {
}
type Config struct {
- Timeout web.Duration
- BinaryPath string `yaml:"binary_path"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ BinaryPath string `yaml:"binary_path" json:"binary_path"`
}
type (
NVMe struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
@@ -61,24 +64,36 @@ type (
}
)
-func (n *NVMe) Init() bool {
+func (n *NVMe) Configuration() any {
+ return n.Config
+}
+
+func (n *NVMe) Init() error {
if err := n.validateConfig(); err != nil {
n.Errorf("config validation: %v", err)
- return false
+ return err
}
v, err := n.initNVMeCLIExec()
if err != nil {
n.Errorf("init nvme-cli exec: %v", err)
- return false
+ return err
}
n.exec = v
- return true
+ return nil
}
-func (n *NVMe) Check() bool {
- return len(n.Collect()) > 0
+func (n *NVMe) Check() error {
+ mx, err := n.collect()
+ if err != nil {
+ n.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (n *NVMe) Charts() *module.Charts {
diff --git a/modules/nvme/nvme_test.go b/modules/nvme/nvme_test.go
index 26c55182b..e70f0cfec 100644
--- a/modules/nvme/nvme_test.go
+++ b/modules/nvme/nvme_test.go
@@ -9,11 +9,16 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
dataNVMeListJSON, _ = os.ReadFile("testdata/nvme-list.json")
dataNVMeListEmptyJSON, _ = os.ReadFile("testdata/nvme-list-empty.json")
dataNVMeSmartLogJSON, _ = os.ReadFile("testdata/nvme-smart-log.json")
@@ -23,15 +28,21 @@ var (
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
"dataNVMeListJSON": dataNVMeListJSON,
"dataNVMeListEmptyJSON": dataNVMeListEmptyJSON,
"dataNVMeSmartLogStringJSON": dataNVMeSmartLogStringJSON,
"dataNVMeSmartLogFloatJSON": dataNVMeSmartLogFloatJSON,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestNVMe_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &NVMe{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestNVMe_Init(t *testing.T) {
tests := map[string]struct {
prepare func(n *NVMe)
@@ -58,9 +69,9 @@ func TestNVMe_Init(t *testing.T) {
test.prepare(nv)
if test.wantFail {
- assert.False(t, nv.Init())
+ assert.Error(t, nv.Init())
} else {
- assert.True(t, nv.Init())
+ assert.NoError(t, nv.Init())
}
})
}
@@ -104,9 +115,9 @@ func TestNVMe_Check(t *testing.T) {
test.prepare(n)
if test.wantFail {
- assert.False(t, n.Check())
+ assert.Error(t, n.Check())
} else {
- assert.True(t, n.Check())
+ assert.NoError(t, n.Check())
}
})
}
diff --git a/modules/nvme/testdata/config.json b/modules/nvme/testdata/config.json
new file mode 100644
index 000000000..095713193
--- /dev/null
+++ b/modules/nvme/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "binary_path": "ok"
+}
diff --git a/modules/nvme/testdata/config.yaml b/modules/nvme/testdata/config.yaml
new file mode 100644
index 000000000..baf3bcd0b
--- /dev/null
+++ b/modules/nvme/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+timeout: 123.123
+binary_path: "ok"
diff --git a/modules/openvpn/config_schema.json b/modules/openvpn/config_schema.json
index db6442db9..b140ec436 100644
--- a/modules/openvpn/config_schema.json
+++ b/modules/openvpn/config_schema.json
@@ -1,52 +1,86 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/openvpn job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "address": {
- "type": "string"
- },
- "connect_timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "OpenVPN collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the OpenVPN server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the OpenVPN Management Interface listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:123"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection, read, and write timeout duration in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "per_user_stats": {
+ "title": "User selector",
+ "description": "Configuration for monitoring specific users. The logic for inclusion and exclusion is as follows: (include1 OR include2) AND !(exclude1 or exclude2). Patterns follow the syntax of matcher patterns.",
+ "type": "object",
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include users whose usernames match any of the specified inclusion patterns.",
+ "type": "array",
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude users whose usernames match any of the specified exclusion patterns.",
+ "type": "array",
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ }
},
- "read_timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "write_timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "per_user_stats": {
- "type": "object",
- "properties": {
- "includes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
},
- "excludes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ {
+ "title": "User stats",
+ "fields": [
+ "per_user_stats"
+ ]
}
- }
+ ]
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/openvpn/init.go b/modules/openvpn/init.go
new file mode 100644
index 000000000..9168ad57b
--- /dev/null
+++ b/modules/openvpn/init.go
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package openvpn
+
+import (
+ "github.com/netdata/go.d.plugin/modules/openvpn/client"
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+ "github.com/netdata/go.d.plugin/pkg/socket"
+)
+
+func (o *OpenVPN) validateConfig() error {
+ return nil
+}
+
+func (o *OpenVPN) initPerUserMatcher() (matcher.Matcher, error) {
+ if o.PerUserStats.Empty() {
+ return nil, nil
+ }
+ return o.PerUserStats.Parse()
+}
+
+func (o *OpenVPN) initClient() *client.Client {
+ config := socket.Config{
+ Address: o.Address,
+ ConnectTimeout: o.Timeout.Duration(),
+ ReadTimeout: o.Timeout.Duration(),
+ WriteTimeout: o.Timeout.Duration(),
+ }
+ return &client.Client{Client: socket.New(config)}
+}
diff --git a/modules/openvpn/metadata.yaml b/modules/openvpn/metadata.yaml
index 9d3e2e330..a91f10f70 100644
--- a/modules/openvpn/metadata.yaml
+++ b/modules/openvpn/metadata.yaml
@@ -72,6 +72,10 @@ modules:
description: Server address in IP:PORT format.
default_value: 127.0.0.1:7505
required: true
+ - name: timeout
+ description: Connection, read, and write timeout duration in seconds. The timeout includes name resolution.
+ default_value: 1
+ required: false
- name: per_user_stats
description: User selector. Determines which user metrics will be collected.
default_value: ""
@@ -92,18 +96,6 @@ modules:
- pattern3
- pattern4
```
- - name: connect_timeout
- description: Connection timeout in seconds. The timeout includes name resolution, if required.
- default_value: 2
- required: false
- - name: read_timeout
- description: Read timeout in seconds. Sets deadline for read calls.
- default_value: 2
- required: false
- - name: write_timeout
- description: Write timeout in seconds. Sets deadline for write calls.
- default_value: 2
- required: false
examples:
folding:
title: Config
diff --git a/modules/openvpn/openvpn.go b/modules/openvpn/openvpn.go
index 0a6ccbb81..19417873b 100644
--- a/modules/openvpn/openvpn.go
+++ b/modules/openvpn/openvpn.go
@@ -6,19 +6,11 @@ import (
_ "embed"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/modules/openvpn/client"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/netdata/go.d.plugin/pkg/socket"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-const (
- defaultAddress = "127.0.0.1:7505"
- defaultConnectTimeout = time.Second * 2
- defaultReadTimeout = time.Second * 2
- defaultWriteTimeout = time.Second * 2
)
//go:embed "config_schema.json"
@@ -27,92 +19,77 @@ var configSchema string
func init() {
module.Register("openvpn", module.Creator{
JobConfigSchema: configSchema,
- Defaults: module.Defaults{
- Disabled: true,
- },
- Create: func() module.Module { return New() },
+ Create: func() module.Module { return New() },
})
}
-// New creates OpenVPN with default values.
func New() *OpenVPN {
- config := Config{
- Address: defaultAddress,
- ConnectTimeout: web.Duration{Duration: defaultConnectTimeout},
- ReadTimeout: web.Duration{Duration: defaultReadTimeout},
- WriteTimeout: web.Duration{Duration: defaultWriteTimeout},
- }
return &OpenVPN{
- Config: config,
+ Config: Config{
+ Address: "127.0.0.1:7505",
+ Timeout: web.Duration(time.Second),
+ },
+
charts: charts.Copy(),
collectedUsers: make(map[string]bool),
}
}
-// Config is the OpenVPN module configuration.
type Config struct {
- Address string
- ConnectTimeout web.Duration `yaml:"connect_timeout"`
- ReadTimeout web.Duration `yaml:"read_timeout"`
- WriteTimeout web.Duration `yaml:"write_timeout"`
- PerUserStats matcher.SimpleExpr `yaml:"per_user_stats"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ PerUserStats matcher.SimpleExpr `yaml:"per_user_stats" json:"per_user_stats"`
}
-type openVPNClient interface {
- socket.Client
- Version() (*client.Version, error)
- LoadStats() (*client.LoadStats, error)
- Users() (client.Users, error)
-}
+type (
+ OpenVPN struct {
+ module.Base
+ Config `yaml:",inline" json:""`
-// OpenVPN OpenVPN module.
-type OpenVPN struct {
- module.Base
- Config `yaml:",inline"`
- client openVPNClient
- charts *Charts
- collectedUsers map[string]bool
- perUserMatcher matcher.Matcher
-}
+ charts *Charts
-// Cleanup makes cleanup.
-func (o *OpenVPN) Cleanup() {
- if o.client == nil {
- return
+ client openVPNClient
+
+ collectedUsers map[string]bool
+ perUserMatcher matcher.Matcher
}
- _ = o.client.Disconnect()
+ openVPNClient interface {
+ socket.Client
+ Version() (*client.Version, error)
+ LoadStats() (*client.LoadStats, error)
+ Users() (client.Users, error)
+ }
+)
+
+func (o *OpenVPN) Configuration() any {
+ return o.Config
}
-// Init makes initialization.
-func (o *OpenVPN) Init() bool {
- if !o.PerUserStats.Empty() {
- m, err := o.PerUserStats.Parse()
- if err != nil {
- o.Errorf("error on creating per user stats matcher : %v", err)
- return false
- }
- o.perUserMatcher = matcher.WithCache(m)
+func (o *OpenVPN) Init() error {
+ if err := o.validateConfig(); err != nil {
+ o.Error(err)
+ return err
}
- config := socket.Config{
- Address: o.Address,
- ConnectTimeout: o.ConnectTimeout.Duration,
- ReadTimeout: o.ReadTimeout.Duration,
- WriteTimeout: o.WriteTimeout.Duration,
+ m, err := o.initPerUserMatcher()
+ if err != nil {
+ o.Error(err)
+ return err
}
- o.client = &client.Client{Client: socket.New(config)}
+ o.perUserMatcher = m
- o.Infof("using address: %s, connect timeout: %s, read timeout: %s, write timeout: %s",
- o.Address, o.ConnectTimeout.Duration, o.ReadTimeout.Duration, o.WriteTimeout.Duration)
+ o.client = o.initClient()
- return true
+ o.Infof("using address: %s, timeout: %s", o.Address, o.Timeout)
+
+ return nil
}
-// Check makes check.
-func (o *OpenVPN) Check() bool {
+func (o *OpenVPN) Check() error {
if err := o.client.Connect(); err != nil {
o.Error(err)
- return false
+ return err
}
defer func() { _ = o.client.Disconnect() }()
@@ -120,17 +97,16 @@ func (o *OpenVPN) Check() bool {
if err != nil {
o.Error(err)
o.Cleanup()
- return false
+ return err
}
o.Infof("connected to OpenVPN v%d.%d.%d, Management v%d", ver.Major, ver.Minor, ver.Patch, ver.Management)
- return true
+
+ return nil
}
-// Charts creates Charts.
-func (o OpenVPN) Charts() *Charts { return o.charts }
+func (o *OpenVPN) Charts() *Charts { return o.charts }
-// Collect collects metrics.
func (o *OpenVPN) Collect() map[string]int64 {
mx, err := o.collect()
if err != nil {
@@ -142,3 +118,10 @@ func (o *OpenVPN) Collect() map[string]int64 {
}
return mx
}
+
+func (o *OpenVPN) Cleanup() {
+ if o.client == nil {
+ return
+ }
+ _ = o.client.Disconnect()
+}
diff --git a/modules/openvpn/openvpn_test.go b/modules/openvpn/openvpn_test.go
index 02fa1a602..cedb5d9ab 100644
--- a/modules/openvpn/openvpn_test.go
+++ b/modules/openvpn/openvpn_test.go
@@ -3,61 +3,46 @@
package openvpn
import (
+ "os"
"testing"
"github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/modules/openvpn/client"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/netdata/go.d.plugin/pkg/socket"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testVersion = client.Version{Major: 1, Minor: 1, Patch: 1, Management: 1}
- testLoadStats = client.LoadStats{NumOfClients: 1, BytesIn: 1, BytesOut: 1}
- testUsers = client.Users{{
- CommonName: "common_name",
- RealAddress: "1.2.3.4:4321",
- VirtualAddress: "1.2.3.4",
- BytesReceived: 1,
- BytesSent: 2,
- ConnectedSince: 3,
- Username: "name",
- }}
- testUsersUNDEF = client.Users{{
- CommonName: "common_name",
- RealAddress: "1.2.3.4:4321",
- VirtualAddress: "1.2.3.4",
- BytesReceived: 1,
- BytesSent: 2,
- ConnectedSince: 3,
- Username: "UNDEF",
- }}
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
)
-func TestNew(t *testing.T) {
- job := New()
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
- assert.Implements(t, (*module.Module)(nil), job)
- assert.Equal(t, defaultAddress, job.Address)
- assert.Equal(t, defaultConnectTimeout, job.ConnectTimeout.Duration)
- assert.Equal(t, defaultReadTimeout, job.ReadTimeout.Duration)
- assert.Equal(t, defaultWriteTimeout, job.WriteTimeout.Duration)
- assert.NotNil(t, job.charts)
- assert.NotNil(t, job.collectedUsers)
+func TestOpenVPN_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &OpenVPN{}, dataConfigJSON, dataConfigYAML)
}
func TestOpenVPN_Init(t *testing.T) {
- assert.True(t, New().Init())
+ assert.NoError(t, New().Init())
}
func TestOpenVPN_Check(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.client = prepareMockOpenVPNClient()
- require.True(t, job.Check())
+ require.NoError(t, job.Check())
}
func TestOpenVPN_Charts(t *testing.T) {
@@ -68,19 +53,19 @@ func TestOpenVPN_Cleanup(t *testing.T) {
job := New()
assert.NotPanics(t, job.Cleanup)
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.client = prepareMockOpenVPNClient()
- require.True(t, job.Check())
+ require.NoError(t, job.Check())
job.Cleanup()
}
func TestOpenVPN_Collect(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.perUserMatcher = matcher.TRUE()
job.client = prepareMockOpenVPNClient()
- require.True(t, job.Check())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"bytes_in": 1,
@@ -99,12 +84,12 @@ func TestOpenVPN_Collect(t *testing.T) {
func TestOpenVPN_Collect_UNDEFUsername(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.perUserMatcher = matcher.TRUE()
cl := prepareMockOpenVPNClient()
cl.users = testUsersUNDEF
job.client = cl
- require.True(t, job.Check())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"bytes_in": 1,
@@ -134,12 +119,35 @@ type mockOpenVPNClient struct {
users client.Users
}
-func (m *mockOpenVPNClient) Connect() error { return nil }
-func (m *mockOpenVPNClient) Disconnect() error { return nil }
-func (m mockOpenVPNClient) Version() (*client.Version, error) { return &m.version, nil }
-func (m mockOpenVPNClient) LoadStats() (*client.LoadStats, error) { return &m.loadStats, nil }
-func (m mockOpenVPNClient) Users() (client.Users, error) { return m.users, nil }
+func (m *mockOpenVPNClient) Connect() error { return nil }
+func (m *mockOpenVPNClient) Disconnect() error { return nil }
+func (m *mockOpenVPNClient) Version() (*client.Version, error) { return &m.version, nil }
+func (m *mockOpenVPNClient) LoadStats() (*client.LoadStats, error) { return &m.loadStats, nil }
+func (m *mockOpenVPNClient) Users() (client.Users, error) { return m.users, nil }
func (m *mockOpenVPNClient) Command(_ string, _ socket.Processor) error {
// mocks are done on the individual commands. e.g. in Version() below
panic("should be called in the mock")
}
+
+var (
+ testVersion = client.Version{Major: 1, Minor: 1, Patch: 1, Management: 1}
+ testLoadStats = client.LoadStats{NumOfClients: 1, BytesIn: 1, BytesOut: 1}
+ testUsers = client.Users{{
+ CommonName: "common_name",
+ RealAddress: "1.2.3.4:4321",
+ VirtualAddress: "1.2.3.4",
+ BytesReceived: 1,
+ BytesSent: 2,
+ ConnectedSince: 3,
+ Username: "name",
+ }}
+ testUsersUNDEF = client.Users{{
+ CommonName: "common_name",
+ RealAddress: "1.2.3.4:4321",
+ VirtualAddress: "1.2.3.4",
+ BytesReceived: 1,
+ BytesSent: 2,
+ ConnectedSince: 3,
+ Username: "UNDEF",
+ }}
+)
diff --git a/modules/openvpn/testdata/config.json b/modules/openvpn/testdata/config.json
new file mode 100644
index 000000000..30411ebf3
--- /dev/null
+++ b/modules/openvpn/testdata/config.json
@@ -0,0 +1,13 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "per_user_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/modules/openvpn/testdata/config.yaml b/modules/openvpn/testdata/config.yaml
new file mode 100644
index 000000000..22296ce56
--- /dev/null
+++ b/modules/openvpn/testdata/config.yaml
@@ -0,0 +1,8 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+per_user_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/modules/openvpn_status_log/config_schema.json b/modules/openvpn_status_log/config_schema.json
index 904da56c0..1647163bc 100644
--- a/modules/openvpn_status_log/config_schema.json
+++ b/modules/openvpn_status_log/config_schema.json
@@ -1,34 +1,78 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/openvpn_status_log job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "OpenVPN status log collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the OpenVPN status log file.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "log_path": {
+ "title": "Log path",
+ "description": "Path to status log file.",
+ "type": "string",
+ "default": "/var/log/openvpn/status.log"
+ },
+ "per_user_stats": {
+ "title": "User selector",
+ "description": "Configuration for monitoring specific users. The logic for inclusion and exclusion is as follows: (include1 OR include2) AND !(exclude1 or exclude2). Patterns follow the syntax of matcher patterns.",
+ "type": "object",
+ "properties": {
+ "includes": {
+ "title": "Include",
+ "description": "Include users whose usernames match any of the specified inclusion patterns.",
+ "type": "array",
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "excludes": {
+ "title": "Exclude",
+ "description": "Exclude users whose usernames match any of the specified exclusion patterns.",
+ "type": "array",
+ "items": {
+ "title": "Username pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ }
+ }
+ }
},
- "log_path": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "per_user_stats": {
- "type": "object",
- "properties": {
- "includes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "log_path"
+ ]
},
- "excludes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ {
+ "title": "User stats",
+ "fields": [
+ "per_user_stats"
+ ]
}
- }
+ ]
}
- },
- "required": [
- "name",
- "log_path"
- ]
+ }
}
diff --git a/modules/openvpn_status_log/init.go b/modules/openvpn_status_log/init.go
index 9bd34a510..5e1521e5e 100644
--- a/modules/openvpn_status_log/init.go
+++ b/modules/openvpn_status_log/init.go
@@ -7,14 +7,14 @@ import (
"github.com/netdata/go.d.plugin/pkg/matcher"
)
-func (o OpenVPNStatusLog) validateConfig() error {
+func (o *OpenVPNStatusLog) validateConfig() error {
if o.LogPath == "" {
return errors.New("empty 'log_path'")
}
return nil
}
-func (o OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) {
+func (o *OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) {
if o.PerUserStats.Empty() {
return nil, nil
}
diff --git a/modules/openvpn_status_log/openvpn.go b/modules/openvpn_status_log/openvpn.go
index dc9e7340b..560a09219 100644
--- a/modules/openvpn_status_log/openvpn.go
+++ b/modules/openvpn_status_log/openvpn.go
@@ -4,6 +4,7 @@ package openvpn_status_log
import (
_ "embed"
+ "errors"
"github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
@@ -20,56 +21,66 @@ func init() {
}
func New() *OpenVPNStatusLog {
- config := Config{
- LogPath: "/var/log/openvpn/status.log",
- }
return &OpenVPNStatusLog{
- Config: config,
+ Config: Config{
+ LogPath: "/var/log/openvpn/status.log",
+ },
charts: charts.Copy(),
collectedUsers: make(map[string]bool),
}
}
type Config struct {
- LogPath string `yaml:"log_path"`
- PerUserStats matcher.SimpleExpr `yaml:"per_user_stats"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ LogPath string `yaml:"log_path" json:"log_path"`
+ PerUserStats matcher.SimpleExpr `yaml:"per_user_stats" json:"per_user_stats"`
}
type OpenVPNStatusLog struct {
module.Base
-
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- collectedUsers map[string]bool
perUserMatcher matcher.Matcher
+ collectedUsers map[string]bool
}
-func (o *OpenVPNStatusLog) Init() bool {
+func (o *OpenVPNStatusLog) Configuration() any {
+ return o.Config
+}
+
+func (o *OpenVPNStatusLog) Init() error {
if err := o.validateConfig(); err != nil {
o.Errorf("error on validating config: %v", err)
- return false
+ return err
}
m, err := o.initPerUserStatsMatcher()
if err != nil {
o.Errorf("error on creating 'per_user_stats' matcher: %v", err)
- return false
+ return err
}
-
if m != nil {
o.perUserMatcher = m
}
- return true
+ return nil
}
-func (o *OpenVPNStatusLog) Check() bool {
- return len(o.Collect()) > 0
+func (o *OpenVPNStatusLog) Check() error {
+ mx, err := o.collect()
+ if err != nil {
+ o.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-func (o OpenVPNStatusLog) Charts() *module.Charts {
+func (o *OpenVPNStatusLog) Charts() *module.Charts {
return o.charts
}
diff --git a/modules/openvpn_status_log/openvpn_test.go b/modules/openvpn_status_log/openvpn_test.go
index d54d27824..6e2d77e2a 100644
--- a/modules/openvpn_status_log/openvpn_test.go
+++ b/modules/openvpn_status_log/openvpn_test.go
@@ -3,13 +3,15 @@
package openvpn_status_log
import (
+ "os"
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "github.com/netdata/go.d.plugin/pkg/matcher"
)
const (
@@ -24,7 +26,22 @@ const (
pathStatusVersion3NoClients = "testdata/v2.5.1/version3-no-clients.txt"
)
-func TestNew(t *testing.T) {
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestOpenVPNStatusLog_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &OpenVPNStatusLog{}, dataConfigJSON, dataConfigYAML)
}
func TestOpenVPNStatusLog_Init(t *testing.T) {
@@ -49,9 +66,9 @@ func TestOpenVPNStatusLog_Init(t *testing.T) {
ovpn.Config = test.config
if test.wantFail {
- assert.False(t, ovpn.Init())
+ assert.Error(t, ovpn.Init())
} else {
- assert.True(t, ovpn.Init())
+ assert.NoError(t, ovpn.Init())
}
})
}
@@ -76,12 +93,12 @@ func TestOpenVPNStatusLog_Check(t *testing.T) {
t.Run(name, func(t *testing.T) {
ovpn := test.prepare()
- require.True(t, ovpn.Init())
+ require.NoError(t, ovpn.Init())
if test.wantFail {
- assert.False(t, ovpn.Check())
+ assert.Error(t, ovpn.Check())
} else {
- assert.True(t, ovpn.Check())
+ assert.NoError(t, ovpn.Check())
}
})
}
@@ -114,7 +131,7 @@ func TestOpenVPNStatusLog_Charts(t *testing.T) {
t.Run(name, func(t *testing.T) {
ovpn := test.prepare()
- require.True(t, ovpn.Init())
+ require.NoError(t, ovpn.Init())
_ = ovpn.Check()
_ = ovpn.Collect()
@@ -240,7 +257,7 @@ func TestOpenVPNStatusLog_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
ovpn := test.prepare()
- require.True(t, ovpn.Init())
+ require.NoError(t, ovpn.Init())
_ = ovpn.Check()
collected := ovpn.Collect()
diff --git a/modules/openvpn_status_log/testdata/config.json b/modules/openvpn_status_log/testdata/config.json
new file mode 100644
index 000000000..078a1ae56
--- /dev/null
+++ b/modules/openvpn_status_log/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "log_path": "ok",
+ "per_user_stats": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/modules/openvpn_status_log/testdata/config.yaml b/modules/openvpn_status_log/testdata/config.yaml
new file mode 100644
index 000000000..1a27ab974
--- /dev/null
+++ b/modules/openvpn_status_log/testdata/config.yaml
@@ -0,0 +1,7 @@
+update_every: 123
+log_path: "ok"
+per_user_stats:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/modules/pgbouncer/collect.go b/modules/pgbouncer/collect.go
index 40dbddb9f..c0e4bf2da 100644
--- a/modules/pgbouncer/collect.go
+++ b/modules/pgbouncer/collect.go
@@ -236,7 +236,7 @@ func (p *PgBouncer) queryVersion() (*semver.Version, error) {
p.Debugf("executing query: %v", q)
var resp string
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
if err := p.db.QueryRowContext(ctx, q).Scan(&resp); err != nil {
return nil, err
@@ -281,7 +281,7 @@ func (p *PgBouncer) openConnection() error {
}
func (p *PgBouncer) collectQuery(query string, assign func(column, value string)) error {
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
rows, err := p.db.QueryContext(ctx, query)
if err != nil {
diff --git a/modules/pgbouncer/config_schema.json b/modules/pgbouncer/config_schema.json
index 16cf22ecb..1e4d5a9f8 100644
--- a/modules/pgbouncer/config_schema.json
+++ b/modules/pgbouncer/config_schema.json
@@ -1,23 +1,37 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/pgbouncer job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "dsn": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "PgBouncer collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the PgBouncer server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "PgBouncer server Data Source Name (DSN) specifying the connection details.",
+ "type": "string",
+ "default": "postgres://netdata:password@127.0.0.1:6432/pgbouncer"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for queries executed against the PgBouncer server.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "dsn"
- ]
+ }
}
diff --git a/modules/pgbouncer/pgbouncer.go b/modules/pgbouncer/pgbouncer.go
index ebb11327b..5f6eae361 100644
--- a/modules/pgbouncer/pgbouncer.go
+++ b/modules/pgbouncer/pgbouncer.go
@@ -5,6 +5,7 @@ package pgbouncer
import (
"database/sql"
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -27,7 +28,7 @@ func init() {
func New() *PgBouncer {
return &PgBouncer{
Config: Config{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
DSN: "postgres://postgres:postgres@127.0.0.1:6432/pgbouncer",
},
charts: globalCharts.Copy(),
@@ -39,19 +40,20 @@ func New() *PgBouncer {
}
type Config struct {
- DSN string `yaml:"dsn"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type PgBouncer struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- db *sql.DB
- version *semver.Version
+ db *sql.DB
+ version *semver.Version
recheckSettingsTime time.Time
recheckSettingsEvery time.Duration
maxClientConn int64
@@ -59,18 +61,30 @@ type PgBouncer struct {
metrics *metrics
}
-func (p *PgBouncer) Init() bool {
+func (p *PgBouncer) Configuration() any {
+ return p.Config
+}
+
+func (p *PgBouncer) Init() error {
err := p.validateConfig()
if err != nil {
p.Errorf("config validation: %v", err)
- return false
+ return err
}
- return true
+ return nil
}
-func (p *PgBouncer) Check() bool {
- return len(p.Collect()) > 0
+func (p *PgBouncer) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (p *PgBouncer) Charts() *module.Charts {
diff --git a/modules/pgbouncer/pgbouncer_test.go b/modules/pgbouncer/pgbouncer_test.go
index e1e0695dd..a510d0abb 100644
--- a/modules/pgbouncer/pgbouncer_test.go
+++ b/modules/pgbouncer/pgbouncer_test.go
@@ -12,33 +12,44 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- dataV170Version, _ = os.ReadFile("testdata/v1.7.0/version.txt")
- dataV1170Version, _ = os.ReadFile("testdata/v1.17.0/version.txt")
- dataV1170Config, _ = os.ReadFile("testdata/v1.17.0/config.txt")
- dataV1170Databases, _ = os.ReadFile("testdata/v1.17.0/databases.txt")
- dataV1170Pools, _ = os.ReadFile("testdata/v1.17.0/pools.txt")
- dataV1170Stats, _ = os.ReadFile("testdata/v1.17.0/stats.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer170Version, _ = os.ReadFile("testdata/v1.7.0/version.txt")
+ dataVer1170Version, _ = os.ReadFile("testdata/v1.17.0/version.txt")
+ dataVer1170Config, _ = os.ReadFile("testdata/v1.17.0/config.txt")
+ dataVer1170Databases, _ = os.ReadFile("testdata/v1.17.0/databases.txt")
+ dataVer1170Pools, _ = os.ReadFile("testdata/v1.17.0/pools.txt")
+ dataVer1170Stats, _ = os.ReadFile("testdata/v1.17.0/stats.txt")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "dataV170Version": dataV170Version,
- "dataV1170Version": dataV1170Version,
- "dataV1170Config": dataV1170Config,
- "dataV1170Databases": dataV1170Databases,
- "dataV1170Pools": dataV1170Pools,
- "dataV1170Stats": dataV1170Stats,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer170Version": dataVer170Version,
+ "dataVer1170Version": dataVer1170Version,
+ "dataVer1170Config": dataVer1170Config,
+ "dataVer1170Databases": dataVer1170Databases,
+ "dataVer1170Pools": dataVer1170Pools,
+ "dataVer1170Stats": dataVer1170Stats,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestPgBouncer_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &PgBouncer{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestPgBouncer_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -60,9 +71,9 @@ func TestPgBouncer_Init(t *testing.T) {
p.Config = test.config
if test.wantFail {
- assert.False(t, p.Init())
+ assert.Error(t, p.Init())
} else {
- assert.True(t, p.Init())
+ assert.NoError(t, p.Init())
}
})
}
@@ -80,11 +91,11 @@ func TestPgBouncer_Check(t *testing.T) {
"Success when all queries are successful (v1.17.0)": {
wantFail: false,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataV1170Version)
- mockExpect(t, m, queryShowConfig, dataV1170Config)
- mockExpect(t, m, queryShowDatabases, dataV1170Databases)
- mockExpect(t, m, queryShowStats, dataV1170Stats)
- mockExpect(t, m, queryShowPools, dataV1170Pools)
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
+ mockExpect(t, m, queryShowConfig, dataVer1170Config)
+ mockExpect(t, m, queryShowDatabases, dataVer1170Databases)
+ mockExpect(t, m, queryShowStats, dataVer1170Stats)
+ mockExpect(t, m, queryShowPools, dataVer1170Pools)
},
},
"Fail when querying version returns an error": {
@@ -96,13 +107,13 @@ func TestPgBouncer_Check(t *testing.T) {
"Fail when querying version returns unsupported version": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataV170Version)
+ mockExpect(t, m, queryShowVersion, dataVer170Version)
},
},
"Fail when querying config returns an error": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataV1170Version)
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
mockExpectErr(m, queryShowConfig)
},
},
@@ -118,14 +129,14 @@ func TestPgBouncer_Check(t *testing.T) {
p.db = db
defer func() { _ = db.Close() }()
- require.True(t, p.Init())
+ require.NoError(t, p.Init())
test.prepareMock(t, mock)
if test.wantFail {
- assert.False(t, p.Check())
+ assert.Error(t, p.Check())
} else {
- assert.True(t, p.Check())
+ assert.NoError(t, p.Check())
}
assert.NoError(t, mock.ExpectationsWereMet())
})
@@ -141,11 +152,11 @@ func TestPgBouncer_Collect(t *testing.T) {
"Success on all queries (v1.17.0)": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataV1170Version)
- mockExpect(t, m, queryShowConfig, dataV1170Config)
- mockExpect(t, m, queryShowDatabases, dataV1170Databases)
- mockExpect(t, m, queryShowStats, dataV1170Stats)
- mockExpect(t, m, queryShowPools, dataV1170Pools)
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
+ mockExpect(t, m, queryShowConfig, dataVer1170Config)
+ mockExpect(t, m, queryShowDatabases, dataVer1170Databases)
+ mockExpect(t, m, queryShowStats, dataVer1170Stats)
+ mockExpect(t, m, queryShowPools, dataVer1170Pools)
},
check: func(t *testing.T, p *PgBouncer) {
mx := p.Collect()
@@ -249,7 +260,7 @@ func TestPgBouncer_Collect(t *testing.T) {
"Fail when querying version returns unsupported version": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataV170Version)
+ mockExpect(t, m, queryShowVersion, dataVer170Version)
},
check: func(t *testing.T, p *PgBouncer) {
mx := p.Collect()
@@ -261,7 +272,7 @@ func TestPgBouncer_Collect(t *testing.T) {
"Fail when querying config returns an error": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryShowVersion, dataV1170Version)
+ mockExpect(t, m, queryShowVersion, dataVer1170Version)
mockExpectErr(m, queryShowConfig)
},
check: func(t *testing.T, p *PgBouncer) {
@@ -283,7 +294,7 @@ func TestPgBouncer_Collect(t *testing.T) {
p.db = db
defer func() { _ = db.Close() }()
- require.True(t, p.Init())
+ require.NoError(t, p.Init())
for i, step := range test {
t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
diff --git a/modules/pgbouncer/testdata/config.json b/modules/pgbouncer/testdata/config.json
new file mode 100644
index 000000000..ed8b72dcb
--- /dev/null
+++ b/modules/pgbouncer/testdata/config.json
@@ -0,0 +1,5 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "timeout": 123.123
+}
diff --git a/modules/pgbouncer/testdata/config.yaml b/modules/pgbouncer/testdata/config.yaml
new file mode 100644
index 000000000..caff49039
--- /dev/null
+++ b/modules/pgbouncer/testdata/config.yaml
@@ -0,0 +1,3 @@
+update_every: 123
+dsn: "ok"
+timeout: 123.123
diff --git a/modules/phpdaemon/config_schema.json b/modules/phpdaemon/config_schema.json
index c200d437b..876f6c1dc 100644
--- a/modules/phpdaemon/config_schema.json
+++ b/modules/phpdaemon/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/phpdaemon job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "phpDaemon collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the phpDaemon status page.",
+ "type": "string",
+ "default": "http://127.0.0.1:8509/FullStatuss"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/phpdaemon/init.go b/modules/phpdaemon/init.go
new file mode 100644
index 000000000..d96b23011
--- /dev/null
+++ b/modules/phpdaemon/init.go
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package phpdaemon
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (p *PHPDaemon) validateConfig() error {
+ if p.URL == "" {
+ return errors.New("url not set")
+ }
+ if _, err := web.NewHTTPRequest(p.Request); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (p *PHPDaemon) initClient() (*client, error) {
+ httpClient, err := web.NewHTTPClient(p.Client)
+ if err != nil {
+ return nil, err
+ }
+ return newAPIClient(httpClient, p.Request), nil
+}
diff --git a/modules/phpdaemon/phpdaemon.go b/modules/phpdaemon/phpdaemon.go
index 506892cfe..708be337c 100644
--- a/modules/phpdaemon/phpdaemon.go
+++ b/modules/phpdaemon/phpdaemon.go
@@ -4,11 +4,11 @@ package phpdaemon
import (
_ "embed"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -21,88 +21,80 @@ func init() {
})
}
-const (
- defaultURL = "http://127.0.0.1:8509/FullStatus"
- defaultHTTPTimeout = time.Second * 2
-)
-
-// New creates PHPDaemon with default values.
func New() *PHPDaemon {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &PHPDaemon{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8509/FullStatus",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
-
- return &PHPDaemon{
- Config: config,
charts: charts.Copy(),
}
}
-// Config is the PHPDaemon module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
-// PHPDaemon PHPDaemon module.
type PHPDaemon struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
- client *client
charts *Charts
+
+ client *client
}
-// Cleanup makes cleanup.
-func (PHPDaemon) Cleanup() {}
+func (p *PHPDaemon) Configuration() any {
+ return p.Config
+}
-// Init makes initialization.
-func (p *PHPDaemon) Init() bool {
- httpClient, err := web.NewHTTPClient(p.Client)
- if err != nil {
- p.Errorf("error on creating http client : %v", err)
- return false
+func (p *PHPDaemon) Init() error {
+ if err := p.validateConfig(); err != nil {
+ p.Error(err)
+ return err
}
- _, err = web.NewHTTPRequest(p.Request)
+ c, err := p.initClient()
if err != nil {
- p.Errorf("error on creating http request to %s : %v", p.URL, err)
- return false
+ p.Error(err)
+ return err
}
-
- p.client = newAPIClient(httpClient, p.Request)
+ p.client = c
p.Debugf("using URL %s", p.URL)
- p.Debugf("using timeout: %s", p.Timeout.Duration)
+ p.Debugf("using timeout: %s", p.Timeout)
- return true
+ return nil
}
-// Check makes check.
-func (p *PHPDaemon) Check() bool {
- mx := p.Collect()
-
+func (p *PHPDaemon) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
if len(mx) == 0 {
- return false
+ return errors.New("no metrics collected")
}
+
if _, ok := mx["uptime"]; ok {
- // TODO: remove panic
- panicIf(p.charts.Add(uptimeChart.Copy()))
+ _ = p.charts.Add(uptimeChart.Copy())
}
- return true
+ return nil
}
-// Charts creates Charts.
-func (p PHPDaemon) Charts() *Charts { return p.charts }
+func (p *PHPDaemon) Charts() *Charts {
+ return p.charts
+}
-// Collect collects metrics.
func (p *PHPDaemon) Collect() map[string]int64 {
mx, err := p.collect()
@@ -114,9 +106,8 @@ func (p *PHPDaemon) Collect() map[string]int64 {
return mx
}
-func panicIf(err error) {
- if err == nil {
- return
+func (p *PHPDaemon) Cleanup() {
+ if p.client != nil && p.client.httpClient != nil {
+ p.client.httpClient.CloseIdleConnections()
}
- panic(err)
}
diff --git a/modules/phpdaemon/phpdaemon_test.go b/modules/phpdaemon/phpdaemon_test.go
index 0634e6ec4..af3559734 100644
--- a/modules/phpdaemon/phpdaemon_test.go
+++ b/modules/phpdaemon/phpdaemon_test.go
@@ -9,32 +9,36 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-const (
- testURL = "http://127.0.0.1:38001"
-)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
-var testFullStatusData, _ = os.ReadFile("testdata/fullstatus.json")
+ dataFullStatusMetrics, _ = os.ReadFile("testdata/fullstatus.json")
+)
-func Test_testData(t *testing.T) {
- assert.NotEmpty(t, testFullStatusData)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataFullStatusMetrics": dataFullStatusMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- job := New()
-
- assert.Implements(t, (*module.Module)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+func TestPHPDaemon_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &PHPDaemon{}, dataConfigJSON, dataConfigYAML)
}
func TestPHPDaemon_Init(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.NotNil(t, job.client)
}
@@ -42,21 +46,21 @@ func TestPHPDaemon_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testFullStatusData)
+ _, _ = w.Write(dataFullStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestPHPDaemon_CheckNG(t *testing.T) {
job := New()
- job.URL = testURL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ job.URL = "http://127.0.0.1:38001"
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestPHPDaemon_Charts(t *testing.T) {
@@ -68,13 +72,13 @@ func TestPHPDaemon_Charts(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testFullStatusData)
+ _, _ = w.Write(dataFullStatusMetrics)
}))
defer ts.Close()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
assert.True(t, job.charts.Has(uptimeChart.ID))
}
@@ -86,14 +90,14 @@ func TestPHPDaemon_Collect(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testFullStatusData)
+ _, _ = w.Write(dataFullStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
expected := map[string]int64{
"alive": 350,
@@ -121,8 +125,8 @@ func TestPHPDaemon_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestPHPDaemon_404(t *testing.T) {
@@ -135,6 +139,6 @@ func TestPHPDaemon_404(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/phpdaemon/testdata/config.json b/modules/phpdaemon/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/phpdaemon/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/phpdaemon/testdata/config.yaml b/modules/phpdaemon/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/phpdaemon/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/phpfpm/config_schema.json b/modules/phpfpm/config_schema.json
index a6b0140f3..60cc0ae52 100644
--- a/modules/phpfpm/config_schema.json
+++ b/modules/phpfpm/config_schema.json
@@ -1,84 +1,91 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/phpfpm job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "socket": {
- "type": "string"
- },
- "address": {
- "type": "string"
- },
- "fcgi_path": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "go.d/phpfpm job configuration schema.",
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "url": {
+ "type": "string"
+ },
+ "socket": {
+ "type": "string"
+ },
+ "address": {
+ "type": "string"
+ },
+ "fcgi_path": {
+ "type": "string"
+ },
+ "timeout": {
+ "type": [
+ "string",
+ "integer"
+ ]
+ },
+ "username": {
"type": "string"
+ },
+ "password": {
+ "type": "string"
+ },
+ "proxy_url": {
+ "type": "string"
+ },
+ "proxy_username": {
+ "type": "string"
+ },
+ "proxy_password": {
+ "type": "string"
+ },
+ "headers": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "not_follow_redirects": {
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "type": "string"
+ },
+ "tls_cert": {
+ "type": "string"
+ },
+ "tls_key": {
+ "type": "string"
+ },
+ "insecure_skip_verify": {
+ "type": "boolean"
}
},
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
- }
+ "oneOf": [
+ {
+ "required": [
+ "name",
+ "url"
+ ]
+ },
+ {
+ "required": [
+ "name",
+ "socket"
+ ]
+ },
+ {
+ "required": [
+ "name",
+ "address"
+ ]
+ }
+ ]
},
- "oneOf": [
- {
- "required": [
- "name",
- "url"
- ]
- },
- {
- "required": [
- "name",
- "socket"
- ]
- },
- {
- "required": [
- "name",
- "address"
- ]
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
}
- ]
+ }
}
diff --git a/modules/phpfpm/init.go b/modules/phpfpm/init.go
index 0e764cbe0..5a6694634 100644
--- a/modules/phpfpm/init.go
+++ b/modules/phpfpm/init.go
@@ -10,7 +10,7 @@ import (
"github.com/netdata/go.d.plugin/pkg/web"
)
-func (p Phpfpm) initClient() (client, error) {
+func (p *Phpfpm) initClient() (client, error) {
if p.Socket != "" {
return p.initSocketClient()
}
@@ -20,32 +20,38 @@ func (p Phpfpm) initClient() (client, error) {
if p.URL != "" {
return p.initHTTPClient()
}
+
return nil, errors.New("neither 'socket' nor 'url' set")
}
-func (p Phpfpm) initHTTPClient() (*httpClient, error) {
+func (p *Phpfpm) initHTTPClient() (*httpClient, error) {
c, err := web.NewHTTPClient(p.Client)
if err != nil {
return nil, fmt.Errorf("create HTTP client: %v", err)
}
+
p.Debugf("using HTTP client, URL: %s", p.URL)
- p.Debugf("using timeout: %s", p.Timeout.Duration)
+ p.Debugf("using timeout: %s", p.Timeout)
+
return newHTTPClient(c, p.Request)
}
-func (p Phpfpm) initSocketClient() (*socketClient, error) {
+func (p *Phpfpm) initSocketClient() (*socketClient, error) {
if _, err := os.Stat(p.Socket); err != nil {
return nil, fmt.Errorf("the socket '%s' does not exist: %v", p.Socket, err)
}
+
p.Debugf("using socket client: %s", p.Socket)
- p.Debugf("using timeout: %s", p.Timeout.Duration)
+ p.Debugf("using timeout: %s", p.Timeout)
p.Debugf("using fcgi path: %s", p.FcgiPath)
- return newSocketClient(p.Socket, p.Timeout.Duration, p.FcgiPath), nil
+
+ return newSocketClient(p.Socket, p.Timeout.Duration(), p.FcgiPath), nil
}
-func (p Phpfpm) initTcpClient() (*tcpClient, error) {
+func (p *Phpfpm) initTcpClient() (*tcpClient, error) {
p.Debugf("using tcp client: %s", p.Address)
- p.Debugf("using timeout: %s", p.Timeout.Duration)
+ p.Debugf("using timeout: %s", p.Timeout)
p.Debugf("using fcgi path: %s", p.FcgiPath)
- return newTcpClient(p.Address, p.Timeout.Duration, p.FcgiPath), nil
+
+ return newTcpClient(p.Address, p.Timeout.Duration(), p.FcgiPath), nil
}
diff --git a/modules/phpfpm/phpfpm.go b/modules/phpfpm/phpfpm.go
index a61827929..d56455287 100644
--- a/modules/phpfpm/phpfpm.go
+++ b/modules/phpfpm/phpfpm.go
@@ -4,11 +4,11 @@ package phpfpm
import (
_ "embed"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -29,7 +29,7 @@ func New() *Phpfpm {
URL: "http://127.0.0.1/status?full&json",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
FcgiPath: "/status",
@@ -37,36 +37,49 @@ func New() *Phpfpm {
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- Socket string `yaml:"socket"`
- Address string `yaml:"address"`
- FcgiPath string `yaml:"fcgi_path"`
- }
- Phpfpm struct {
- module.Base
- Config `yaml:",inline"`
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Socket string `yaml:"socket" json:"socket"`
+ Address string `yaml:"address" json:"address"`
+ FcgiPath string `yaml:"fcgi_path" json:"fcgi_path"`
+}
- client client
- }
-)
+type Phpfpm struct {
+ module.Base
+ Config `yaml:",inline" json:""`
+
+ client client
+}
-func (p *Phpfpm) Init() bool {
+func (p *Phpfpm) Configuration() any {
+ return p.Config
+}
+
+func (p *Phpfpm) Init() error {
c, err := p.initClient()
if err != nil {
p.Errorf("init client: %v", err)
- return false
+ return err
}
p.client = c
- return true
+
+ return nil
}
-func (p *Phpfpm) Check() bool {
- return len(p.Collect()) > 0
+func (p *Phpfpm) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-func (Phpfpm) Charts() *Charts {
+func (p *Phpfpm) Charts() *Charts {
return charts.Copy()
}
@@ -82,4 +95,4 @@ func (p *Phpfpm) Collect() map[string]int64 {
return mx
}
-func (Phpfpm) Cleanup() {}
+func (p *Phpfpm) Cleanup() {}
diff --git a/modules/phpfpm/phpfpm_test.go b/modules/phpfpm/phpfpm_test.go
index 5b9ecd236..210e00ad5 100644
--- a/modules/phpfpm/phpfpm_test.go
+++ b/modules/phpfpm/phpfpm_test.go
@@ -9,38 +9,44 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testStatusJSON, _ = os.ReadFile("testdata/status.json")
- testStatusFullJSON, _ = os.ReadFile("testdata/status-full.json")
- testStatusFullNoIdleJSON, _ = os.ReadFile("testdata/status-full-no-idle.json")
- testStatusText, _ = os.ReadFile("testdata/status.txt")
- testStatusFullText, _ = os.ReadFile("testdata/status-full.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatusJSON, _ = os.ReadFile("testdata/status.json")
+ dataStatusFullJSON, _ = os.ReadFile("testdata/status-full.json")
+ dataStatusFullNoIdleJSON, _ = os.ReadFile("testdata/status-full-no-idle.json")
+ dataStatusText, _ = os.ReadFile("testdata/status.txt")
+ dataStatusFullText, _ = os.ReadFile("testdata/status-full.txt")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, testStatusJSON)
- assert.NotNil(t, testStatusFullJSON)
- assert.NotNil(t, testStatusFullNoIdleJSON)
- assert.NotNil(t, testStatusText)
- assert.NotNil(t, testStatusFullText)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStatusJSON": dataStatusJSON,
+ "dataStatusFullJSON": dataStatusFullJSON,
+ "dataStatusFullNoIdleJSON": dataStatusFullNoIdleJSON,
+ "dataStatusText": dataStatusText,
+ "dataStatusFullText": dataStatusFullText,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- job := New()
-
- assert.Implements(t, (*module.Module)(nil), job)
+func TestPhpfpm_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Phpfpm{}, dataConfigJSON, dataConfigYAML)
}
func TestPhpfpm_Init(t *testing.T) {
job := New()
- got := job.Init()
-
- require.True(t, got)
+ require.NoError(t, job.Init())
assert.NotNil(t, job.client)
}
@@ -48,49 +54,42 @@ func TestPhpfpm_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusText)
+ _, _ = w.Write(dataStatusText)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- job.Init()
- require.True(t, job.Init())
-
- got := job.Check()
+ require.NoError(t, job.Init())
- assert.True(t, got)
+ assert.NoError(t, job.Check())
}
func TestPhpfpm_CheckReturnsFalseOnFailure(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/us"
- require.True(t, job.Init())
-
- got := job.Check()
+ require.NoError(t, job.Init())
- assert.False(t, got)
+ assert.Error(t, job.Check())
}
func TestPhpfpm_Charts(t *testing.T) {
job := New()
- got := job.Charts()
-
- assert.NotNil(t, got)
+ assert.NotNil(t, job.Charts())
}
func TestPhpfpm_CollectJSON(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusJSON)
+ _, _ = w.Write(dataStatusJSON)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/?json"
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
got := job.Collect()
@@ -109,13 +108,13 @@ func TestPhpfpm_CollectJSONFull(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusFullJSON)
+ _, _ = w.Write(dataStatusFullJSON)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/?json"
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
got := job.Collect()
@@ -143,13 +142,13 @@ func TestPhpfpm_CollectNoIdleProcessesJSONFull(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusFullNoIdleJSON)
+ _, _ = w.Write(dataStatusFullNoIdleJSON)
}))
defer ts.Close()
job := New()
job.URL = ts.URL + "/?json"
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
got := job.Collect()
@@ -168,13 +167,13 @@ func TestPhpfpm_CollectText(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusText)
+ _, _ = w.Write(dataStatusText)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
got := job.Collect()
@@ -193,13 +192,13 @@ func TestPhpfpm_CollectTextFull(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusFullText)
+ _, _ = w.Write(dataStatusFullText)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
got := job.Collect()
@@ -233,11 +232,9 @@ func TestPhpfpm_CollectReturnsNothingWhenInvalidData(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
-
- got := job.Collect()
+ require.NoError(t, job.Init())
- assert.Len(t, got, 0)
+ assert.Len(t, job.Collect(), 0)
}
func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) {
@@ -250,11 +247,9 @@ func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
- got := job.Collect()
-
- assert.Len(t, got, 0)
+ assert.Len(t, job.Collect(), 0)
}
func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) {
@@ -267,11 +262,9 @@ func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
-
- got := job.Collect()
+ require.NoError(t, job.Init())
- assert.Len(t, got, 0)
+ assert.Len(t, job.Collect(), 0)
}
func TestPhpfpm_Cleanup(t *testing.T) {
diff --git a/modules/phpfpm/testdata/config.json b/modules/phpfpm/testdata/config.json
new file mode 100644
index 000000000..458343f74
--- /dev/null
+++ b/modules/phpfpm/testdata/config.json
@@ -0,0 +1,23 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "socket": "ok",
+ "address": "ok",
+ "fcgi_path": "ok"
+}
diff --git a/modules/phpfpm/testdata/config.yaml b/modules/phpfpm/testdata/config.yaml
new file mode 100644
index 000000000..6c7bea094
--- /dev/null
+++ b/modules/phpfpm/testdata/config.yaml
@@ -0,0 +1,20 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+socket: "ok"
+address: "ok"
+fcgi_path: "ok"
diff --git a/modules/pihole/config_schema.json b/modules/pihole/config_schema.json
index e4c13fa10..ac00f6818 100644
--- a/modules/pihole/config_schema.json
+++ b/modules/pihole/config_schema.json
@@ -1,62 +1,159 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/pihole job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Pi-hole collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the Pi-hole instance.",
+ "type": "string",
+ "default": "http://127.0.0.1"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "setup_vars_path": {
+ "title": "Path to setupVars.conf",
+ "description": "This file is used to get the web password.",
+ "type": "string",
+ "default": "/etc/pihole/setupVars.conf"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "setup_vars_path"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "setup_vars_path": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/pihole/pihole.go b/modules/pihole/pihole.go
index 6aba5cad0..58f9a4091 100644
--- a/modules/pihole/pihole.go
+++ b/modules/pihole/pihole.go
@@ -4,13 +4,13 @@ package pihole
import (
_ "embed"
+ "errors"
"net/http"
"sync"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -34,7 +34,8 @@ func New() *Pihole {
URL: "http://127.0.0.1",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 5}},
+ Timeout: web.Duration(time.Second * 5),
+ },
},
SetupVarsPath: "/etc/pihole/setupVars.conf",
},
@@ -46,32 +47,38 @@ func New() *Pihole {
}
type Config struct {
- web.HTTP `yaml:",inline"`
- SetupVarsPath string `yaml:"setup_vars_path"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ SetupVarsPath string `yaml:"setup_vars_path" json:"setup_vars_path"`
}
type Pihole struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
addQueriesTypesOnce *sync.Once
addFwsDestinationsOnce *sync.Once
- httpClient *http.Client
+ httpClient *http.Client
+
checkVersion bool
}
-func (p *Pihole) Init() bool {
+func (p *Pihole) Configuration() any {
+ return p.Config
+}
+
+func (p *Pihole) Init() error {
if err := p.validateConfig(); err != nil {
p.Errorf("config validation: %v", err)
- return false
+ return err
}
httpClient, err := p.initHTTPClient()
if err != nil {
p.Errorf("init http client: %v", err)
- return false
+ return err
}
p.httpClient = httpClient
@@ -82,11 +89,19 @@ func (p *Pihole) Init() bool {
p.Debugf("web password: %s", p.Password)
}
- return true
+ return nil
}
-func (p *Pihole) Check() bool {
- return len(p.Collect()) > 0
+func (p *Pihole) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (p *Pihole) Charts() *module.Charts {
diff --git a/modules/pihole/pihole_test.go b/modules/pihole/pihole_test.go
index 08ad244a7..d5326365c 100644
--- a/modules/pihole/pihole_test.go
+++ b/modules/pihole/pihole_test.go
@@ -9,6 +9,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/web"
"github.com/stretchr/testify/assert"
@@ -21,12 +22,32 @@ const (
)
var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
dataEmptyResp = []byte("[]")
dataSummaryRawResp, _ = os.ReadFile("testdata/summaryRaw.json")
dataGetQueryTypesResp, _ = os.ReadFile("testdata/getQueryTypes.json")
dataGetForwardDestinationsResp, _ = os.ReadFile("testdata/getForwardDestinations.json")
)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataEmptyResp": dataEmptyResp,
+ "dataSummaryRawResp": dataSummaryRawResp,
+ "dataGetQueryTypesResp": dataGetQueryTypesResp,
+ "dataGetForwardDestinationsResp": dataGetForwardDestinationsResp,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPihole_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Pihole{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestPihole_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -52,9 +73,9 @@ func TestPihole_Init(t *testing.T) {
p.Config = test.config
if test.wantFail {
- assert.False(t, p.Init())
+ assert.Error(t, p.Init())
} else {
- assert.True(t, p.Init())
+ assert.NoError(t, p.Init())
}
})
}
@@ -85,9 +106,9 @@ func TestPihole_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, p.Check())
+ assert.Error(t, p.Check())
} else {
- assert.True(t, p.Check())
+ assert.NoError(t, p.Check())
}
})
}
@@ -164,7 +185,7 @@ func caseSuccessWithWebPassword(t *testing.T) (*Pihole, func()) {
p.SetupVarsPath = pathSetupVarsOK
p.URL = srv.URL
- require.True(t, p.Init())
+ require.NoError(t, p.Init())
return p, srv.Close
}
@@ -175,7 +196,7 @@ func caseFailNoWebPassword(t *testing.T) (*Pihole, func()) {
p.SetupVarsPath = pathSetupVarsWrong
p.URL = srv.URL
- require.True(t, p.Init())
+ require.NoError(t, p.Init())
return p, srv.Close
}
@@ -186,7 +207,7 @@ func caseFailUnsupportedVersion(t *testing.T) (*Pihole, func()) {
p.SetupVarsPath = pathSetupVarsOK
p.URL = srv.URL
- require.True(t, p.Init())
+ require.NoError(t, p.Init())
return p, srv.Close
}
diff --git a/modules/pihole/testdata/config.json b/modules/pihole/testdata/config.json
new file mode 100644
index 000000000..2d82443b0
--- /dev/null
+++ b/modules/pihole/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "setup_vars_path": "ok"
+}
diff --git a/modules/pihole/testdata/config.yaml b/modules/pihole/testdata/config.yaml
new file mode 100644
index 000000000..a9361246a
--- /dev/null
+++ b/modules/pihole/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+setup_vars_path: "ok"
diff --git a/modules/pika/config_schema.json b/modules/pika/config_schema.json
index d284faaa1..17c2e92df 100644
--- a/modules/pika/config_schema.json
+++ b/modules/pika/config_schema.json
@@ -1,35 +1,82 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "type": "object",
- "title": "go.d/pika job configuration schema.",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Pika collector configuration.",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Pika URL",
+ "description": "The URL specifying the connection details for the Pika server.",
+ "type": "string",
+ "default": "redis://@localhost:9221"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection, read, and write timeout duration in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
- },
- "tls_skip_verify": {
- "type": "boolean"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/pika/init.go b/modules/pika/init.go
index 2ad3ae8ec..d52cb23b7 100644
--- a/modules/pika/init.go
+++ b/modules/pika/init.go
@@ -11,14 +11,14 @@ import (
"github.com/go-redis/redis/v8"
)
-func (p Pika) validateConfig() error {
+func (p *Pika) validateConfig() error {
if p.Address == "" {
return errors.New("'address' not set")
}
return nil
}
-func (p Pika) initRedisClient() (*redis.Client, error) {
+func (p *Pika) initRedisClient() (*redis.Client, error) {
opts, err := redis.ParseURL(p.Address)
if err != nil {
return nil, err
@@ -35,13 +35,13 @@ func (p Pika) initRedisClient() (*redis.Client, error) {
opts.PoolSize = 1
opts.TLSConfig = tlsConfig
- opts.DialTimeout = p.Timeout.Duration
- opts.ReadTimeout = p.Timeout.Duration
- opts.WriteTimeout = p.Timeout.Duration
+ opts.DialTimeout = p.Timeout.Duration()
+ opts.ReadTimeout = p.Timeout.Duration()
+ opts.WriteTimeout = p.Timeout.Duration()
return redis.NewClient(opts), nil
}
-func (p Pika) initCharts() (*module.Charts, error) {
+func (p *Pika) initCharts() (*module.Charts, error) {
return pikaCharts.Copy(), nil
}
diff --git a/modules/pika/pika.go b/modules/pika/pika.go
index a14a44113..4cd2b3342 100644
--- a/modules/pika/pika.go
+++ b/modules/pika/pika.go
@@ -5,6 +5,7 @@ package pika
import (
"context"
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -29,7 +30,7 @@ func New() *Pika {
return &Pika{
Config: Config{
Address: "redis://@localhost:9221",
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
collectedCommands: make(map[string]bool),
@@ -38,25 +39,25 @@ func New() *Pika {
}
type Config struct {
- Address string `yaml:"address"`
- Timeout web.Duration `yaml:"timeout"`
- tlscfg.TLSConfig `yaml:",inline"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type (
Pika struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:""`
- pdb redisClient
+ charts *module.Charts
- server string
- version *semver.Version
+ pdb redisClient
+ server string
+ version *semver.Version
collectedCommands map[string]bool
collectedDbs map[string]bool
-
- charts *module.Charts
}
redisClient interface {
Info(ctx context.Context, section ...string) *redis.StringCmd
@@ -64,32 +65,44 @@ type (
}
)
-func (p *Pika) Init() bool {
+func (p *Pika) Configuration() any {
+ return p.Config
+}
+
+func (p *Pika) Init() error {
err := p.validateConfig()
if err != nil {
p.Errorf("config validation: %v", err)
- return false
+ return err
}
pdb, err := p.initRedisClient()
if err != nil {
p.Errorf("init redis client: %v", err)
- return false
+ return err
}
p.pdb = pdb
charts, err := p.initCharts()
if err != nil {
p.Errorf("init charts: %v", err)
- return false
+ return err
}
p.charts = charts
- return true
+ return nil
}
-func (p *Pika) Check() bool {
- return len(p.Collect()) > 0
+func (p *Pika) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (p *Pika) Charts() *module.Charts {
diff --git a/modules/pika/pika_test.go b/modules/pika/pika_test.go
index a564a54ce..4d987fefa 100644
--- a/modules/pika/pika_test.go
+++ b/modules/pika/pika_test.go
@@ -8,6 +8,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/go-redis/redis/v8"
@@ -16,21 +17,26 @@ import (
)
var (
- redisInfoAll, _ = os.ReadFile("testdata/redis/info_all.txt")
- v340InfoAll, _ = os.ReadFile("testdata/v3.4.0/info_all.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataRedisInfoAll, _ = os.ReadFile("testdata/redis/info_all.txt")
+ dataVer340InfoAll, _ = os.ReadFile("testdata/v3.4.0/info_all.txt")
)
-func Test_Testdata(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "redisInfoAll": redisInfoAll,
- "v340InfoAll": v340InfoAll,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataRedisInfoAll": dataRedisInfoAll,
+ "dataVer340InfoAll": dataVer340InfoAll,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Pika)(nil), New())
+func TestPika_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Pika{}, dataConfigJSON, dataConfigYAML)
}
func TestPika_Init(t *testing.T) {
@@ -64,9 +70,9 @@ func TestPika_Init(t *testing.T) {
pika.Config = test.config
if test.wantFail {
- assert.False(t, pika.Init())
+ assert.Error(t, pika.Init())
} else {
- assert.True(t, pika.Init())
+ assert.NoError(t, pika.Init())
}
})
}
@@ -95,9 +101,9 @@ func TestPika_Check(t *testing.T) {
pika := test.prepare(t)
if test.wantFail {
- assert.False(t, pika.Check())
+ assert.Error(t, pika.Check())
} else {
- assert.True(t, pika.Check())
+ assert.NoError(t, pika.Check())
}
})
}
@@ -105,7 +111,7 @@ func TestPika_Check(t *testing.T) {
func TestPika_Charts(t *testing.T) {
pika := New()
- require.True(t, pika.Init())
+ require.NoError(t, pika.Init())
assert.NotNil(t, pika.Charts())
}
@@ -114,7 +120,7 @@ func TestPika_Cleanup(t *testing.T) {
pika := New()
assert.NotPanics(t, pika.Cleanup)
- require.True(t, pika.Init())
+ require.NoError(t, pika.Init())
m := &mockRedisClient{}
pika.pdb = m
@@ -195,16 +201,16 @@ func TestPika_Collect(t *testing.T) {
func preparePikaV340(t *testing.T) *Pika {
pika := New()
- require.True(t, pika.Init())
+ require.NoError(t, pika.Init())
pika.pdb = &mockRedisClient{
- result: v340InfoAll,
+ result: dataVer340InfoAll,
}
return pika
}
func preparePikaErrorOnInfo(t *testing.T) *Pika {
pika := New()
- require.True(t, pika.Init())
+ require.NoError(t, pika.Init())
pika.pdb = &mockRedisClient{
errOnInfo: true,
}
@@ -213,9 +219,9 @@ func preparePikaErrorOnInfo(t *testing.T) *Pika {
func preparePikaWithRedisMetrics(t *testing.T) *Pika {
pika := New()
- require.True(t, pika.Init())
+ require.NoError(t, pika.Init())
pika.pdb = &mockRedisClient{
- result: redisInfoAll,
+ result: dataRedisInfoAll,
}
return pika
}
diff --git a/modules/pika/testdata/config.json b/modules/pika/testdata/config.json
new file mode 100644
index 000000000..d8ba812ab
--- /dev/null
+++ b/modules/pika/testdata/config.json
@@ -0,0 +1,9 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/pika/testdata/config.yaml b/modules/pika/testdata/config.yaml
new file mode 100644
index 000000000..6a6f6ae69
--- /dev/null
+++ b/modules/pika/testdata/config.yaml
@@ -0,0 +1,7 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/ping/config_schema.json b/modules/ping/config_schema.json
index fe3779bf4..50b6b53a2 100644
--- a/modules/ping/config_schema.json
+++ b/modules/ping/config_schema.json
@@ -1,47 +1,69 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "type": "object",
- "title": "go.d/ping job configuration schema.",
- "properties": {
- "name": {
- "type": "string"
- },
- "update_every": {
- "type": "integer",
- "minimum": 1
- },
- "hosts": {
- "type": "array",
- "items": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Ping collector configuration.",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which a set number of ping packets (specified by 'packets') are sent to the specified hosts.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
},
- "minItems": 1
- },
- "network": {
- "type": "string",
- "enum": [
- "ip",
- "ip4",
- "ip6"
- ]
- },
- "privileged": {
- "type": "boolean"
+ "privileged": {
+ "title": "Privileged mode",
+ "description": "Determines the type of ping packets. If unset, sends unprivileged UDP ping packets; if set, sends raw ICMP ping packets (requires elevated privileges).",
+ "type": "boolean",
+ "default": false
+ },
+ "hosts": {
+ "title": "Network hosts",
+ "description": "List of network hosts (IP addresses or domain names) to send ping packets.",
+ "type": "array",
+ "items": {
+ "title": "Host",
+ "type": "string"
+ },
+ "minItems": 1,
+ "uniqueItems": true
+ },
+ "network": {
+ "title": "Network",
+ "description": "Determines the type of network resolution to use for the specified hosts. Choose one of the following options: 'ip' (selects IPv4 or IPv6 based on system configuration), 'ipv4' (forces resolution to IPv4 addresses), 'ipv6' (forces resolution to IPv6 addresses).",
+ "type": "string",
+ "default": "ip",
+ "enum": [
+ "ip",
+ "ip4",
+ "ip6"
+ ]
+ },
+ "packets": {
+ "title": "Packets",
+ "description": "Number of ping packets to send for each host.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "interval": {
+ "title": "Interval",
+ "description": "Timeout between sending ping packets, in seconds.",
+ "type": "number",
+ "minimum": 0.1,
+ "default": 0.1
+ }
},
- "sendPackets": {
- "type": "integer",
- "minimum": 1
+ "required": [
+ "hosts"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"interval": {
- "type": "integer",
- "minimum": 1
- },
- "interface": {
- "type": "string"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "hosts"
- ]
+ }
}
diff --git a/modules/ping/init.go b/modules/ping/init.go
index e71aa6c75..62d78c8e6 100644
--- a/modules/ping/init.go
+++ b/modules/ping/init.go
@@ -31,7 +31,7 @@ func (p *Ping) initProber() (prober, error) {
privileged: p.Privileged,
packets: p.SendPackets,
iface: p.Interface,
- interval: p.Interval.Duration,
+ interval: p.Interval.Duration(),
deadline: deadline,
}
diff --git a/modules/ping/ping.go b/modules/ping/ping.go
index 7aa402985..c36833f68 100644
--- a/modules/ping/ping.go
+++ b/modules/ping/ping.go
@@ -4,6 +4,7 @@ package ping
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -32,7 +33,7 @@ func New() *Ping {
Network: "ip",
Privileged: true,
SendPackets: 5,
- Interval: web.Duration{Duration: time.Millisecond * 100},
+ Interval: web.Duration(time.Millisecond * 100),
},
charts: &module.Charts{},
@@ -42,51 +43,63 @@ func New() *Ping {
}
type Config struct {
- UpdateEvery int `yaml:"update_every"`
- Hosts []string `yaml:"hosts"`
- Network string `yaml:"network"`
- Privileged bool `yaml:"privileged"`
- SendPackets int `yaml:"packets"`
- Interval web.Duration `yaml:"interval"`
- Interface string `yaml:"interface"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Hosts []string `yaml:"hosts" json:"hosts"`
+ Network string `yaml:"network" json:"network"`
+ Privileged bool `yaml:"privileged" json:"privileged"`
+ SendPackets int `yaml:"packets" json:"packets"`
+ Interval web.Duration `yaml:"interval" json:"interval"`
+ Interface string `yaml:"interface" json:"interface"`
}
type (
Ping struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- hosts map[string]bool
-
- newProber func(pingProberConfig, *logger.Logger) prober
prober prober
+ newProber func(pingProberConfig, *logger.Logger) prober
+
+ hosts map[string]bool
}
prober interface {
ping(host string) (*probing.Statistics, error)
}
)
-func (p *Ping) Init() bool {
+func (p *Ping) Configuration() any {
+ return p.Config
+}
+
+func (p *Ping) Init() error {
err := p.validateConfig()
if err != nil {
p.Errorf("config validation: %v", err)
- return false
+ return err
}
pr, err := p.initProber()
if err != nil {
p.Errorf("init prober: %v", err)
- return false
+ return err
}
p.prober = pr
- return true
+ return nil
}
-func (p *Ping) Check() bool {
- return len(p.Collect()) > 0
+func (p *Ping) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+
+ }
+ return nil
}
func (p *Ping) Charts() *module.Charts {
diff --git a/modules/ping/ping_test.go b/modules/ping/ping_test.go
index 57958d557..7ad467ef6 100644
--- a/modules/ping/ping_test.go
+++ b/modules/ping/ping_test.go
@@ -4,9 +4,11 @@ package ping
import (
"errors"
+ "os"
"testing"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/logger"
probing "github.com/prometheus-community/pro-bing"
@@ -14,6 +16,24 @@ import (
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPing_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Ping{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestPing_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -39,9 +59,9 @@ func TestPing_Init(t *testing.T) {
ping.UpdateEvery = 1
if test.wantFail {
- assert.False(t, ping.Init())
+ assert.Error(t, ping.Init())
} else {
- assert.True(t, ping.Init())
+ assert.NoError(t, ping.Init())
}
})
}
@@ -75,9 +95,9 @@ func TestPing_Check(t *testing.T) {
ping := test.prepare(t)
if test.wantFail {
- assert.False(t, ping.Check())
+ assert.Error(t, ping.Check())
} else {
- assert.True(t, ping.Check())
+ assert.NoError(t, ping.Check())
}
})
}
@@ -145,7 +165,7 @@ func casePingSuccess(t *testing.T) *Ping {
ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober {
return &mockProber{}
}
- require.True(t, ping.Init())
+ require.NoError(t, ping.Init())
return ping
}
@@ -156,7 +176,7 @@ func casePingError(t *testing.T) *Ping {
ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober {
return &mockProber{errOnPing: true}
}
- require.True(t, ping.Init())
+ require.NoError(t, ping.Init())
return ping
}
diff --git a/modules/ping/testdata/config.json b/modules/ping/testdata/config.json
new file mode 100644
index 000000000..18df64529
--- /dev/null
+++ b/modules/ping/testdata/config.json
@@ -0,0 +1,11 @@
+{
+ "update_every": 123,
+ "hosts": [
+ "ok"
+ ],
+ "network": "ok",
+ "privileged": true,
+ "packets": 123,
+ "interval": 123.123,
+ "interface": "ok"
+}
diff --git a/modules/ping/testdata/config.yaml b/modules/ping/testdata/config.yaml
new file mode 100644
index 000000000..5eacb9413
--- /dev/null
+++ b/modules/ping/testdata/config.yaml
@@ -0,0 +1,8 @@
+update_every: 123
+hosts:
+ - "ok"
+network: "ok"
+privileged: yes
+packets: 123
+interval: 123.123
+interface: "ok"
diff --git a/modules/portcheck/collect.go b/modules/portcheck/collect.go
index 723c105c3..dab45ec41 100644
--- a/modules/portcheck/collect.go
+++ b/modules/portcheck/collect.go
@@ -41,7 +41,7 @@ func (pc *PortCheck) collect() (map[string]int64, error) {
func (pc *PortCheck) checkPort(p *port) {
start := time.Now()
- conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration)
+ conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration())
dur := time.Since(start)
defer func() {
diff --git a/modules/portcheck/config_schema.json b/modules/portcheck/config_schema.json
index 8b9515702..316bc484a 100644
--- a/modules/portcheck/config_schema.json
+++ b/modules/portcheck/config_schema.json
@@ -1,37 +1,52 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/portcheck job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string",
- "minLength": 1
- },
- "host": {
- "type": "string",
- "minLength": 1
- },
- "ports": {
- "type": "array",
- "items": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Portcheck collector configuration.",
+ "description": "Collector for monitoring TCP service availability and response time.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which TCP connection status and response time data is collected.",
"type": "integer",
- "minimum": 1
+ "minimum": 1,
+ "default": 5
+ },
+ "host": {
+ "title": "Network host",
+ "description": "The IP address or domain name of the network host.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The TCP connection timeout duration in seconds. The timeout includes domain name resolution.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
},
- "minItems": 1
+ "ports": {
+ "title": "Ports",
+ "description": "A list of ports to monitor for TCP service availability and response time.",
+ "type": "array",
+ "items": {
+ "title": "Port",
+ "type": "integer",
+ "minimum": 1
+ },
+ "minItems": 1
+ }
+ },
+ "required": [
+ "host",
+ "ports"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ],
- "minLength": 1,
- "minimum": 1,
- "description": "The timeout duration, in seconds. Must be at least 1."
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "host",
- "ports"
- ]
+ }
}
diff --git a/modules/portcheck/init.go b/modules/portcheck/init.go
index d5c2ebb55..23825620b 100644
--- a/modules/portcheck/init.go
+++ b/modules/portcheck/init.go
@@ -4,10 +4,21 @@ package portcheck
import (
"errors"
+ "net"
+ "time"
"github.com/netdata/go.d.plugin/agent/module"
)
+type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error)
+
+type port struct {
+ number int
+ state checkState
+ inState int
+ latency int
+}
+
func (pc *PortCheck) validateConfig() error {
if pc.Host == "" {
return errors.New("'host' parameter not set")
@@ -29,3 +40,10 @@ func (pc *PortCheck) initCharts() (*module.Charts, error) {
return &charts, nil
}
+
+func (pc *PortCheck) initPorts() (ports []*port) {
+ for _, p := range pc.Ports {
+ ports = append(ports, &port{number: p})
+ }
+ return ports
+}
diff --git a/modules/portcheck/portcheck.go b/modules/portcheck/portcheck.go
index c7e2c0b9d..072313dee 100644
--- a/modules/portcheck/portcheck.go
+++ b/modules/portcheck/portcheck.go
@@ -27,63 +27,58 @@ func init() {
func New() *PortCheck {
return &PortCheck{
Config: Config{
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second * 2),
},
dial: net.DialTimeout,
}
}
type Config struct {
- Host string `yaml:"host"`
- Ports []int `yaml:"ports"`
- Timeout web.Duration `yaml:"timeout"`
-}
-
-type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error)
-
-type port struct {
- number int
- state checkState
- inState int
- latency int
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Host string `yaml:"host" json:"host"`
+ Ports []int `yaml:"ports" json:"ports"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type PortCheck struct {
module.Base
- Config `yaml:",inline"`
- UpdateEvery int `yaml:"update_every"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- dial dialFunc
- ports []*port
+
+ dial dialFunc
+
+ ports []*port
+}
+
+func (pc *PortCheck) Configuration() any {
+ return pc.Config
}
-func (pc *PortCheck) Init() bool {
+func (pc *PortCheck) Init() error {
if err := pc.validateConfig(); err != nil {
pc.Errorf("config validation: %v", err)
- return false
+ return err
}
charts, err := pc.initCharts()
if err != nil {
pc.Errorf("init charts: %v", err)
- return false
+ return err
}
pc.charts = charts
- for _, p := range pc.Ports {
- pc.ports = append(pc.ports, &port{number: p})
- }
+ pc.ports = pc.initPorts()
pc.Debugf("using host: %s", pc.Host)
pc.Debugf("using ports: %v", pc.Ports)
pc.Debugf("using TCP connection timeout: %s", pc.Timeout)
- return true
+ return nil
}
-func (pc *PortCheck) Check() bool {
- return true
+func (pc *PortCheck) Check() error {
+ return nil
}
func (pc *PortCheck) Charts() *module.Charts {
diff --git a/modules/portcheck/portcheck_test.go b/modules/portcheck/portcheck_test.go
index 2e242cbbb..62ddfae26 100644
--- a/modules/portcheck/portcheck_test.go
+++ b/modules/portcheck/portcheck_test.go
@@ -5,19 +5,33 @@ package portcheck
import (
"errors"
"net"
+ "os"
"strings"
"testing"
"time"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- job := New()
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
- assert.Implements(t, (*module.Module)(nil), job)
+func TestPortCheck_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &PortCheck{}, dataConfigJSON, dataConfigYAML)
}
func TestPortCheck_Init(t *testing.T) {
@@ -25,21 +39,21 @@ func TestPortCheck_Init(t *testing.T) {
job.Host = "127.0.0.1"
job.Ports = []int{39001, 39002}
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
assert.Len(t, job.ports, 2)
}
func TestPortCheck_InitNG(t *testing.T) {
job := New()
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
job.Host = "127.0.0.1"
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
job.Ports = []int{39001, 39002}
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
}
func TestPortCheck_Check(t *testing.T) {
- assert.True(t, New().Check())
+ assert.NoError(t, New().Check())
}
func TestPortCheck_Cleanup(t *testing.T) {
@@ -50,7 +64,7 @@ func TestPortCheck_Charts(t *testing.T) {
job := New()
job.Ports = []int{1, 2}
job.Host = "localhost"
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.Len(t, *job.Charts(), len(chartsTmpl)*len(job.Ports))
}
@@ -61,8 +75,8 @@ func TestPortCheck_Collect(t *testing.T) {
job.Ports = []int{39001, 39002}
job.UpdateEvery = 5
job.dial = testDial(nil)
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
copyLatency := func(dst, src map[string]int64) {
for k := range dst {
diff --git a/modules/portcheck/testdata/config.json b/modules/portcheck/testdata/config.json
new file mode 100644
index 000000000..a69a6ac38
--- /dev/null
+++ b/modules/portcheck/testdata/config.json
@@ -0,0 +1,8 @@
+{
+ "update_every": 123,
+ "host": "ok",
+ "ports": [
+ 123
+ ],
+ "timeout": 123.123
+}
diff --git a/modules/portcheck/testdata/config.yaml b/modules/portcheck/testdata/config.yaml
new file mode 100644
index 000000000..72bdfd549
--- /dev/null
+++ b/modules/portcheck/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+host: "ok"
+ports:
+ - 123
+timeout: 123.123
diff --git a/modules/postgres/collect.go b/modules/postgres/collect.go
index f66e956a3..b43e2806e 100644
--- a/modules/postgres/collect.go
+++ b/modules/postgres/collect.go
@@ -132,7 +132,7 @@ func (p *Postgres) openPrimaryConnection() (*sql.DB, error) {
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(10 * time.Minute)
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
if err := db.PingContext(ctx); err != nil {
@@ -162,7 +162,7 @@ func (p *Postgres) openSecondaryConnection(dbname string) (*sql.DB, string, erro
db.SetMaxIdleConns(1)
db.SetConnMaxLifetime(10 * time.Minute)
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
if err := db.PingContext(ctx); err != nil {
diff --git a/modules/postgres/config_schema.json b/modules/postgres/config_schema.json
index 98a8616b7..81b3e1bbb 100644
--- a/modules/postgres/config_schema.json
+++ b/modules/postgres/config_schema.json
@@ -1,44 +1,128 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/postgres job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Postgres collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the Postgres server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "Postgres server Data Source Name (DSN) specifying the connection details, including authentication credentials.",
+ "type": "string",
+ "default": "postgres://netdata:password@127.0.0.1:5432/postgres"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for queries executed against the Postgres server.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "collect_databases_matching": {
+ "title": "Database selector",
+ "description": "Configuration for monitoring specific databases using Netdata simple patterns. For example, 'mydb*' will match databases starting with 'mydb'.",
+ "type": "string"
+ },
+ "max_db_tables": {
+ "title": "Database tables limit",
+ "description": "Table metrics will not be collected for databases that have more tables than the limit. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 50
+ },
+ "max_db_indexes": {
+ "title": "Database index limit",
+ "description": "Index metrics will not be collected for databases that have more indexes than the limit. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 250
+ },
+ "transaction_time_histogram": {
+ "title": "Transaction time histogram",
+ "description": "Buckets for transaction time histogram in milliseconds.",
+ "type": "array",
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "uniqueItems": true,
+ "default": [
+ 0.1,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
+ },
+ "query_time_histogram": {
+ "title": "Query time histogram",
+ "description": "Buckets for query time histogram in milliseconds.",
+ "type": "array",
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "uniqueItems": true,
+ "default": [
+ 0.1,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
+ }
},
- "dsn": {
- "type": "string"
+ "required": [
+ "dsn"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "dsn",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Database stats",
+ "fields": [
+ "max_db_tables",
+ "max_db_indexes",
+ "collect_databases_matching"
+ ]
+ },
+ {
+ "title": "Histograms",
+ "fields": [
+ "transaction_time_histogram",
+ "query_time_histogram"
+ ]
+ }
]
},
- "collect_databases_matching": {
- "type": "string"
- },
"transaction_time_histogram": {
- "type": "array",
- "items": {
- "type": "number"
- }
+ "ui:listFlavour": "list"
},
"query_time_histogram": {
- "type": "array",
- "items": {
- "type": "number"
- }
- },
- "max_db_tables": {
- "type": "integer"
- },
- "max_db_indexes": {
- "type": "integer"
+ "ui:listFlavour": "list"
}
- },
- "required": [
- "name",
- "dsn"
- ]
+ }
}
diff --git a/modules/postgres/do_query.go b/modules/postgres/do_query.go
index ea134ec5f..3b90be0d7 100644
--- a/modules/postgres/do_query.go
+++ b/modules/postgres/do_query.go
@@ -8,14 +8,14 @@ import (
)
func (p *Postgres) doQueryRow(query string, v any) error {
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
return p.db.QueryRowContext(ctx, query).Scan(v)
}
func (p *Postgres) doDBQueryRow(db *sql.DB, query string, v any) error {
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
return db.QueryRowContext(ctx, query).Scan(v)
@@ -26,7 +26,7 @@ func (p *Postgres) doQuery(query string, assign func(column, value string, rowEn
}
func (p *Postgres) doDBQuery(db *sql.DB, query string, assign func(column, value string, rowEnd bool)) error {
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
rows, err := db.QueryContext(ctx, query)
diff --git a/modules/postgres/postgres.go b/modules/postgres/postgres.go
index a1dabf9d3..8aab0cdbd 100644
--- a/modules/postgres/postgres.go
+++ b/modules/postgres/postgres.go
@@ -5,6 +5,7 @@ package postgres
import (
"database/sql"
_ "embed"
+ "errors"
"sync"
"time"
@@ -30,7 +31,7 @@ func init() {
func New() *Postgres {
return &Postgres{
Config: Config{
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second * 2),
DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres",
XactTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10},
QueryTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10},
@@ -56,41 +57,38 @@ func New() *Postgres {
}
type Config struct {
- DSN string `yaml:"dsn"`
- Timeout web.Duration `yaml:"timeout"`
- DBSelector string `yaml:"collect_databases_matching"`
- XactTimeHistogram []float64 `yaml:"transaction_time_histogram"`
- QueryTimeHistogram []float64 `yaml:"query_time_histogram"`
- MaxDBTables int64 `yaml:"max_db_tables"`
- MaxDBIndexes int64 `yaml:"max_db_indexes"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ DBSelector string `yaml:"collect_databases_matching" json:"collect_databases_matching"`
+ XactTimeHistogram []float64 `yaml:"transaction_time_histogram" json:"transaction_time_histogram"`
+ QueryTimeHistogram []float64 `yaml:"query_time_histogram" json:"query_time_histogram"`
+ MaxDBTables int64 `yaml:"max_db_tables" json:"max_db_tables"`
+ MaxDBIndexes int64 `yaml:"max_db_indexes" json:"max_db_indexes"`
}
type (
Postgres struct {
module.Base
- Config `yaml:",inline"`
-
- charts *module.Charts
-
- db *sql.DB
- dbConns map[string]*dbConn
-
- superUser *bool
- pgIsInRecovery *bool
- pgVersion int
+ Config `yaml:",inline" json:""`
+ charts *module.Charts
addXactQueryRunningTimeChartsOnce *sync.Once
addWALFilesChartsOnce *sync.Once
- dbSr matcher.Matcher
-
- mx *pgMetrics
+ db *sql.DB
+ dbConns map[string]*dbConn
+ superUser *bool
+ pgIsInRecovery *bool
+ pgVersion int
+ dbSr matcher.Matcher
recheckSettingsTime time.Time
recheckSettingsEvery time.Duration
+ doSlowTime time.Time
+ doSlowEvery time.Duration
- doSlowTime time.Time
- doSlowEvery time.Duration
+ mx *pgMetrics
}
dbConn struct {
db *sql.DB
@@ -99,28 +97,40 @@ type (
}
)
-func (p *Postgres) Init() bool {
+func (p *Postgres) Configuration() any {
+ return p.Config
+}
+
+func (p *Postgres) Init() error {
err := p.validateConfig()
if err != nil {
p.Errorf("config validation: %v", err)
- return false
+ return err
}
sr, err := p.initDBSelector()
if err != nil {
p.Errorf("config validation: %v", err)
- return false
+ return err
}
p.dbSr = sr
p.mx.xactTimeHist = metrics.NewHistogramWithRangeBuckets(p.XactTimeHistogram)
p.mx.queryTimeHist = metrics.NewHistogramWithRangeBuckets(p.QueryTimeHistogram)
- return true
+ return nil
}
-func (p *Postgres) Check() bool {
- return len(p.Collect()) > 0
+func (p *Postgres) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (p *Postgres) Charts() *module.Charts {
diff --git a/modules/postgres/postgres_test.go b/modules/postgres/postgres_test.go
index a41c11235..a96c779e5 100644
--- a/modules/postgres/postgres_test.go
+++ b/modules/postgres/postgres_test.go
@@ -12,6 +12,7 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/DATA-DOG/go-sqlmock"
@@ -20,93 +21,84 @@ import (
)
var (
- dataV140004ServerVersionNum, _ = os.ReadFile("testdata/v14.4/server_version_num.txt")
-
- dataV140004IsSuperUserFalse, _ = os.ReadFile("testdata/v14.4/is_super_user-false.txt")
- dataV140004IsSuperUserTrue, _ = os.ReadFile("testdata/v14.4/is_super_user-true.txt")
- dataV140004PGIsInRecoveryTrue, _ = os.ReadFile("testdata/v14.4/pg_is_in_recovery-true.txt")
- dataV140004SettingsMaxConnections, _ = os.ReadFile("testdata/v14.4/settings_max_connections.txt")
- dataV140004SettingsMaxLocksHeld, _ = os.ReadFile("testdata/v14.4/settings_max_locks_held.txt")
-
- dataV140004ServerCurrentConnections, _ = os.ReadFile("testdata/v14.4/server_current_connections.txt")
- dataV140004ServerConnectionsState, _ = os.ReadFile("testdata/v14.4/server_connections_state.txt")
- dataV140004Checkpoints, _ = os.ReadFile("testdata/v14.4/checkpoints.txt")
- dataV140004ServerUptime, _ = os.ReadFile("testdata/v14.4/uptime.txt")
- dataV140004TXIDWraparound, _ = os.ReadFile("testdata/v14.4/txid_wraparound.txt")
- dataV140004WALWrites, _ = os.ReadFile("testdata/v14.4/wal_writes.txt")
- dataV140004WALFiles, _ = os.ReadFile("testdata/v14.4/wal_files.txt")
- dataV140004WALArchiveFiles, _ = os.ReadFile("testdata/v14.4/wal_archive_files.txt")
- dataV140004CatalogRelations, _ = os.ReadFile("testdata/v14.4/catalog_relations.txt")
- dataV140004AutovacuumWorkers, _ = os.ReadFile("testdata/v14.4/autovacuum_workers.txt")
- dataV140004XactQueryRunningTime, _ = os.ReadFile("testdata/v14.4/xact_query_running_time.txt")
-
- dataV140004ReplStandbyAppDelta, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_delta.txt")
- dataV140004ReplStandbyAppLag, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_lag.txt")
-
- dataV140004ReplSlotFiles, _ = os.ReadFile("testdata/v14.4/replication_slot_files.txt")
-
- dataV140004DatabaseStats, _ = os.ReadFile("testdata/v14.4/database_stats.txt")
- dataV140004DatabaseSize, _ = os.ReadFile("testdata/v14.4/database_size.txt")
- dataV140004DatabaseConflicts, _ = os.ReadFile("testdata/v14.4/database_conflicts.txt")
- dataV140004DatabaseLocks, _ = os.ReadFile("testdata/v14.4/database_locks.txt")
-
- dataV140004QueryableDatabaseList, _ = os.ReadFile("testdata/v14.4/queryable_database_list.txt")
-
- dataV140004StatUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_tables_db_postgres.txt")
- dataV140004StatIOUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/statio_user_tables_db_postgres.txt")
-
- dataV140004StatUserIndexesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_indexes_db_postgres.txt")
-
- dataV140004Bloat, _ = os.ReadFile("testdata/v14.4/bloat_tables.txt")
- dataV140004ColumnsStats, _ = os.ReadFile("testdata/v14.4/table_columns_stats.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer140004ServerVersionNum, _ = os.ReadFile("testdata/v14.4/server_version_num.txt")
+ dataVer140004IsSuperUserFalse, _ = os.ReadFile("testdata/v14.4/is_super_user-false.txt")
+ dataVer140004IsSuperUserTrue, _ = os.ReadFile("testdata/v14.4/is_super_user-true.txt")
+ dataVer140004PGIsInRecoveryTrue, _ = os.ReadFile("testdata/v14.4/pg_is_in_recovery-true.txt")
+ dataVer140004SettingsMaxConnections, _ = os.ReadFile("testdata/v14.4/settings_max_connections.txt")
+ dataVer140004SettingsMaxLocksHeld, _ = os.ReadFile("testdata/v14.4/settings_max_locks_held.txt")
+ dataVer140004ServerCurrentConnections, _ = os.ReadFile("testdata/v14.4/server_current_connections.txt")
+ dataVer140004ServerConnectionsState, _ = os.ReadFile("testdata/v14.4/server_connections_state.txt")
+ dataVer140004Checkpoints, _ = os.ReadFile("testdata/v14.4/checkpoints.txt")
+ dataVer140004ServerUptime, _ = os.ReadFile("testdata/v14.4/uptime.txt")
+ dataVer140004TXIDWraparound, _ = os.ReadFile("testdata/v14.4/txid_wraparound.txt")
+ dataVer140004WALWrites, _ = os.ReadFile("testdata/v14.4/wal_writes.txt")
+ dataVer140004WALFiles, _ = os.ReadFile("testdata/v14.4/wal_files.txt")
+ dataVer140004WALArchiveFiles, _ = os.ReadFile("testdata/v14.4/wal_archive_files.txt")
+ dataVer140004CatalogRelations, _ = os.ReadFile("testdata/v14.4/catalog_relations.txt")
+ dataVer140004AutovacuumWorkers, _ = os.ReadFile("testdata/v14.4/autovacuum_workers.txt")
+ dataVer140004XactQueryRunningTime, _ = os.ReadFile("testdata/v14.4/xact_query_running_time.txt")
+ dataVer140004ReplStandbyAppDelta, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_delta.txt")
+ dataVer140004ReplStandbyAppLag, _ = os.ReadFile("testdata/v14.4/replication_standby_app_wal_lag.txt")
+ dataVer140004ReplSlotFiles, _ = os.ReadFile("testdata/v14.4/replication_slot_files.txt")
+ dataVer140004DatabaseStats, _ = os.ReadFile("testdata/v14.4/database_stats.txt")
+ dataVer140004DatabaseSize, _ = os.ReadFile("testdata/v14.4/database_size.txt")
+ dataVer140004DatabaseConflicts, _ = os.ReadFile("testdata/v14.4/database_conflicts.txt")
+ dataVer140004DatabaseLocks, _ = os.ReadFile("testdata/v14.4/database_locks.txt")
+ dataVer140004QueryableDatabaseList, _ = os.ReadFile("testdata/v14.4/queryable_database_list.txt")
+ dataVer140004StatUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_tables_db_postgres.txt")
+ dataVer140004StatIOUserTablesDBPostgres, _ = os.ReadFile("testdata/v14.4/statio_user_tables_db_postgres.txt")
+ dataVer140004StatUserIndexesDBPostgres, _ = os.ReadFile("testdata/v14.4/stat_user_indexes_db_postgres.txt")
+ dataVer140004Bloat, _ = os.ReadFile("testdata/v14.4/bloat_tables.txt")
+ dataVer140004ColumnsStats, _ = os.ReadFile("testdata/v14.4/table_columns_stats.txt")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "dataV140004ServerVersionNum": dataV140004ServerVersionNum,
-
- "dataV140004IsSuperUserFalse": dataV140004IsSuperUserFalse,
- "dataV140004IsSuperUserTrue": dataV140004IsSuperUserTrue,
- "dataV140004PGIsInRecoveryTrue": dataV140004PGIsInRecoveryTrue,
- "dataV140004SettingsMaxConnections": dataV140004SettingsMaxConnections,
- "dataV140004SettingsMaxLocksHeld": dataV140004SettingsMaxLocksHeld,
-
- "dataV140004ServerCurrentConnections": dataV140004ServerCurrentConnections,
- "dataV140004ServerConnectionsState": dataV140004ServerConnectionsState,
- "dataV140004Checkpoints": dataV140004Checkpoints,
- "dataV140004ServerUptime": dataV140004ServerUptime,
- "dataV140004TXIDWraparound": dataV140004TXIDWraparound,
- "dataV140004WALWrites": dataV140004WALWrites,
- "dataV140004WALFiles": dataV140004WALFiles,
- "dataV140004WALArchiveFiles": dataV140004WALArchiveFiles,
- "dataV140004CatalogRelations": dataV140004CatalogRelations,
- "dataV140004AutovacuumWorkers": dataV140004AutovacuumWorkers,
- "dataV140004XactQueryRunningTime": dataV140004XactQueryRunningTime,
-
- "dataV14004ReplStandbyAppDelta": dataV140004ReplStandbyAppDelta,
- "dataV14004ReplStandbyAppLag": dataV140004ReplStandbyAppLag,
-
- "dataV140004ReplSlotFiles": dataV140004ReplSlotFiles,
-
- "dataV140004DatabaseStats": dataV140004DatabaseStats,
- "dataV140004DatabaseSize": dataV140004DatabaseSize,
- "dataV140004DatabaseConflicts": dataV140004DatabaseConflicts,
- "dataV140004DatabaseLocks": dataV140004DatabaseLocks,
-
- "dataV140004QueryableDatabaseList": dataV140004QueryableDatabaseList,
-
- "dataV140004StatUserTablesDBPostgres": dataV140004StatUserTablesDBPostgres,
- "dataV140004StatIOUserTablesDBPostgres": dataV140004StatIOUserTablesDBPostgres,
-
- "dataV140004StatUserIndexesDBPostgres": dataV140004StatUserIndexesDBPostgres,
-
- "dataV140004Bloat": dataV140004Bloat,
- "dataV140004ColumnsStats": dataV140004ColumnsStats,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer140004ServerVersionNum": dataVer140004ServerVersionNum,
+ "dataVer140004IsSuperUserFalse": dataVer140004IsSuperUserFalse,
+ "dataVer140004IsSuperUserTrue": dataVer140004IsSuperUserTrue,
+ "dataVer140004PGIsInRecoveryTrue": dataVer140004PGIsInRecoveryTrue,
+ "dataVer140004SettingsMaxConnections": dataVer140004SettingsMaxConnections,
+ "dataVer140004SettingsMaxLocksHeld": dataVer140004SettingsMaxLocksHeld,
+ "dataVer140004ServerCurrentConnections": dataVer140004ServerCurrentConnections,
+ "dataVer140004ServerConnectionsState": dataVer140004ServerConnectionsState,
+ "dataVer140004Checkpoints": dataVer140004Checkpoints,
+ "dataVer140004ServerUptime": dataVer140004ServerUptime,
+ "dataVer140004TXIDWraparound": dataVer140004TXIDWraparound,
+ "dataVer140004WALWrites": dataVer140004WALWrites,
+ "dataVer140004WALFiles": dataVer140004WALFiles,
+ "dataVer140004WALArchiveFiles": dataVer140004WALArchiveFiles,
+ "dataVer140004CatalogRelations": dataVer140004CatalogRelations,
+ "dataVer140004AutovacuumWorkers": dataVer140004AutovacuumWorkers,
+ "dataVer140004XactQueryRunningTime": dataVer140004XactQueryRunningTime,
+ "dataV14004ReplStandbyAppDelta": dataVer140004ReplStandbyAppDelta,
+ "dataV14004ReplStandbyAppLag": dataVer140004ReplStandbyAppLag,
+ "dataVer140004ReplSlotFiles": dataVer140004ReplSlotFiles,
+ "dataVer140004DatabaseStats": dataVer140004DatabaseStats,
+ "dataVer140004DatabaseSize": dataVer140004DatabaseSize,
+ "dataVer140004DatabaseConflicts": dataVer140004DatabaseConflicts,
+ "dataVer140004DatabaseLocks": dataVer140004DatabaseLocks,
+ "dataVer140004QueryableDatabaseList": dataVer140004QueryableDatabaseList,
+ "dataVer140004StatUserTablesDBPostgres": dataVer140004StatUserTablesDBPostgres,
+ "dataVer140004StatIOUserTablesDBPostgres": dataVer140004StatIOUserTablesDBPostgres,
+ "dataVer140004StatUserIndexesDBPostgres": dataVer140004StatUserIndexesDBPostgres,
+ "dataVer140004Bloat": dataVer140004Bloat,
+ "dataVer140004ColumnsStats": dataVer140004ColumnsStats,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestPostgres_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Postgres{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestPostgres_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -128,9 +120,9 @@ func TestPostgres_Init(t *testing.T) {
pg.Config = test.config
if test.wantFail {
- assert.False(t, pg.Init())
+ assert.Error(t, pg.Init())
} else {
- assert.True(t, pg.Init())
+ assert.NoError(t, pg.Init())
}
})
}
@@ -154,54 +146,54 @@ func TestPostgres_Check(t *testing.T) {
prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
pg.dbSr = matcher.TRUE()
- mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum)
- mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue)
- mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue)
-
- mockExpect(t, m, querySettingsMaxConnections(), dataV140004SettingsMaxConnections)
- mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld)
-
- mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataV140004ServerCurrentConnections)
- mockExpect(t, m, queryServerConnectionsState(), dataV140004ServerConnectionsState)
- mockExpect(t, m, queryCheckpoints(), dataV140004Checkpoints)
- mockExpect(t, m, queryServerUptime(), dataV140004ServerUptime)
- mockExpect(t, m, queryTXIDWraparound(), dataV140004TXIDWraparound)
- mockExpect(t, m, queryWALWrites(140004), dataV140004WALWrites)
- mockExpect(t, m, queryCatalogRelations(), dataV140004CatalogRelations)
- mockExpect(t, m, queryAutovacuumWorkers(), dataV140004AutovacuumWorkers)
- mockExpect(t, m, queryXactQueryRunningTime(), dataV140004XactQueryRunningTime)
-
- mockExpect(t, m, queryWALFiles(140004), dataV140004WALFiles)
- mockExpect(t, m, queryWALArchiveFiles(140004), dataV140004WALArchiveFiles)
-
- mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataV140004ReplStandbyAppDelta)
- mockExpect(t, m, queryReplicationStandbyAppLag(), dataV140004ReplStandbyAppLag)
- mockExpect(t, m, queryReplicationSlotFiles(140004), dataV140004ReplSlotFiles)
-
- mockExpect(t, m, queryDatabaseStats(), dataV140004DatabaseStats)
- mockExpect(t, m, queryDatabaseSize(140004), dataV140004DatabaseSize)
- mockExpect(t, m, queryDatabaseConflicts(), dataV140004DatabaseConflicts)
- mockExpect(t, m, queryDatabaseLocks(), dataV140004DatabaseLocks)
-
- mockExpect(t, m, queryQueryableDatabaseList(), dataV140004QueryableDatabaseList)
- mockExpect(t, m, queryStatUserTables(), dataV140004StatUserTablesDBPostgres)
- mockExpect(t, m, queryStatIOUserTables(), dataV140004StatIOUserTablesDBPostgres)
- mockExpect(t, m, queryStatUserIndexes(), dataV140004StatUserIndexesDBPostgres)
- mockExpect(t, m, queryBloat(), dataV140004Bloat)
- mockExpect(t, m, queryColumnsStats(), dataV140004ColumnsStats)
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpect(t, m, queryServerConnectionsState(), dataVer140004ServerConnectionsState)
+ mockExpect(t, m, queryCheckpoints(), dataVer140004Checkpoints)
+ mockExpect(t, m, queryServerUptime(), dataVer140004ServerUptime)
+ mockExpect(t, m, queryTXIDWraparound(), dataVer140004TXIDWraparound)
+ mockExpect(t, m, queryWALWrites(140004), dataVer140004WALWrites)
+ mockExpect(t, m, queryCatalogRelations(), dataVer140004CatalogRelations)
+ mockExpect(t, m, queryAutovacuumWorkers(), dataVer140004AutovacuumWorkers)
+ mockExpect(t, m, queryXactQueryRunningTime(), dataVer140004XactQueryRunningTime)
+
+ mockExpect(t, m, queryWALFiles(140004), dataVer140004WALFiles)
+ mockExpect(t, m, queryWALArchiveFiles(140004), dataVer140004WALArchiveFiles)
+
+ mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataVer140004ReplStandbyAppDelta)
+ mockExpect(t, m, queryReplicationStandbyAppLag(), dataVer140004ReplStandbyAppLag)
+ mockExpect(t, m, queryReplicationSlotFiles(140004), dataVer140004ReplSlotFiles)
+
+ mockExpect(t, m, queryDatabaseStats(), dataVer140004DatabaseStats)
+ mockExpect(t, m, queryDatabaseSize(140004), dataVer140004DatabaseSize)
+ mockExpect(t, m, queryDatabaseConflicts(), dataVer140004DatabaseConflicts)
+ mockExpect(t, m, queryDatabaseLocks(), dataVer140004DatabaseLocks)
+
+ mockExpect(t, m, queryQueryableDatabaseList(), dataVer140004QueryableDatabaseList)
+ mockExpect(t, m, queryStatUserTables(), dataVer140004StatUserTablesDBPostgres)
+ mockExpect(t, m, queryStatIOUserTables(), dataVer140004StatIOUserTablesDBPostgres)
+ mockExpect(t, m, queryStatUserIndexes(), dataVer140004StatUserIndexesDBPostgres)
+ mockExpect(t, m, queryBloat(), dataVer140004Bloat)
+ mockExpect(t, m, queryColumnsStats(), dataVer140004ColumnsStats)
},
},
"Fail when the second query unsuccessful (v14.4)": {
wantFail: true,
prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum)
- mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue)
- mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue)
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
- mockExpect(t, m, querySettingsMaxConnections(), dataV140004ServerVersionNum)
- mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld)
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
- mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataV140004ServerCurrentConnections)
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
mockExpectErr(m, queryServerConnectionsState())
},
},
@@ -214,9 +206,9 @@ func TestPostgres_Check(t *testing.T) {
"Fail when querying settings max connection returns an error": {
wantFail: true,
prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum)
- mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue)
- mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue)
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
mockExpectErr(m, querySettingsMaxConnections())
},
@@ -233,14 +225,14 @@ func TestPostgres_Check(t *testing.T) {
pg.db = db
defer func() { _ = db.Close() }()
- require.True(t, pg.Init())
+ require.NoError(t, pg.Init())
test.prepareMock(t, pg, mock)
if test.wantFail {
- assert.False(t, pg.Check())
+ assert.Error(t, pg.Check())
} else {
- assert.True(t, pg.Check())
+ assert.NoError(t, pg.Check())
}
assert.NoError(t, mock.ExpectationsWereMet())
})
@@ -257,41 +249,41 @@ func TestPostgres_Collect(t *testing.T) {
{
prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
pg.dbSr = matcher.TRUE()
- mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum)
- mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue)
- mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue)
-
- mockExpect(t, m, querySettingsMaxConnections(), dataV140004SettingsMaxConnections)
- mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld)
-
- mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataV140004ServerCurrentConnections)
- mockExpect(t, m, queryServerConnectionsState(), dataV140004ServerConnectionsState)
- mockExpect(t, m, queryCheckpoints(), dataV140004Checkpoints)
- mockExpect(t, m, queryServerUptime(), dataV140004ServerUptime)
- mockExpect(t, m, queryTXIDWraparound(), dataV140004TXIDWraparound)
- mockExpect(t, m, queryWALWrites(140004), dataV140004WALWrites)
- mockExpect(t, m, queryCatalogRelations(), dataV140004CatalogRelations)
- mockExpect(t, m, queryAutovacuumWorkers(), dataV140004AutovacuumWorkers)
- mockExpect(t, m, queryXactQueryRunningTime(), dataV140004XactQueryRunningTime)
-
- mockExpect(t, m, queryWALFiles(140004), dataV140004WALFiles)
- mockExpect(t, m, queryWALArchiveFiles(140004), dataV140004WALArchiveFiles)
-
- mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataV140004ReplStandbyAppDelta)
- mockExpect(t, m, queryReplicationStandbyAppLag(), dataV140004ReplStandbyAppLag)
- mockExpect(t, m, queryReplicationSlotFiles(140004), dataV140004ReplSlotFiles)
-
- mockExpect(t, m, queryDatabaseStats(), dataV140004DatabaseStats)
- mockExpect(t, m, queryDatabaseSize(140004), dataV140004DatabaseSize)
- mockExpect(t, m, queryDatabaseConflicts(), dataV140004DatabaseConflicts)
- mockExpect(t, m, queryDatabaseLocks(), dataV140004DatabaseLocks)
-
- mockExpect(t, m, queryQueryableDatabaseList(), dataV140004QueryableDatabaseList)
- mockExpect(t, m, queryStatUserTables(), dataV140004StatUserTablesDBPostgres)
- mockExpect(t, m, queryStatIOUserTables(), dataV140004StatIOUserTablesDBPostgres)
- mockExpect(t, m, queryStatUserIndexes(), dataV140004StatUserIndexesDBPostgres)
- mockExpect(t, m, queryBloat(), dataV140004Bloat)
- mockExpect(t, m, queryColumnsStats(), dataV140004ColumnsStats)
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
+
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
+
+ mockExpect(t, m, queryServerCurrentConnectionsUsed(), dataVer140004ServerCurrentConnections)
+ mockExpect(t, m, queryServerConnectionsState(), dataVer140004ServerConnectionsState)
+ mockExpect(t, m, queryCheckpoints(), dataVer140004Checkpoints)
+ mockExpect(t, m, queryServerUptime(), dataVer140004ServerUptime)
+ mockExpect(t, m, queryTXIDWraparound(), dataVer140004TXIDWraparound)
+ mockExpect(t, m, queryWALWrites(140004), dataVer140004WALWrites)
+ mockExpect(t, m, queryCatalogRelations(), dataVer140004CatalogRelations)
+ mockExpect(t, m, queryAutovacuumWorkers(), dataVer140004AutovacuumWorkers)
+ mockExpect(t, m, queryXactQueryRunningTime(), dataVer140004XactQueryRunningTime)
+
+ mockExpect(t, m, queryWALFiles(140004), dataVer140004WALFiles)
+ mockExpect(t, m, queryWALArchiveFiles(140004), dataVer140004WALArchiveFiles)
+
+ mockExpect(t, m, queryReplicationStandbyAppDelta(140004), dataVer140004ReplStandbyAppDelta)
+ mockExpect(t, m, queryReplicationStandbyAppLag(), dataVer140004ReplStandbyAppLag)
+ mockExpect(t, m, queryReplicationSlotFiles(140004), dataVer140004ReplSlotFiles)
+
+ mockExpect(t, m, queryDatabaseStats(), dataVer140004DatabaseStats)
+ mockExpect(t, m, queryDatabaseSize(140004), dataVer140004DatabaseSize)
+ mockExpect(t, m, queryDatabaseConflicts(), dataVer140004DatabaseConflicts)
+ mockExpect(t, m, queryDatabaseLocks(), dataVer140004DatabaseLocks)
+
+ mockExpect(t, m, queryQueryableDatabaseList(), dataVer140004QueryableDatabaseList)
+ mockExpect(t, m, queryStatUserTables(), dataVer140004StatUserTablesDBPostgres)
+ mockExpect(t, m, queryStatIOUserTables(), dataVer140004StatIOUserTablesDBPostgres)
+ mockExpect(t, m, queryStatUserIndexes(), dataVer140004StatUserIndexesDBPostgres)
+ mockExpect(t, m, queryBloat(), dataVer140004Bloat)
+ mockExpect(t, m, queryColumnsStats(), dataVer140004ColumnsStats)
},
check: func(t *testing.T, pg *Postgres) {
mx := pg.Collect()
@@ -625,9 +617,9 @@ func TestPostgres_Collect(t *testing.T) {
"Fail when querying settings max connections returns an error": {
{
prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum)
- mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue)
- mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue)
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
mockExpectErr(m, querySettingsMaxConnections())
},
@@ -641,12 +633,12 @@ func TestPostgres_Collect(t *testing.T) {
"Fail when querying the server connections returns an error": {
{
prepareMock: func(t *testing.T, pg *Postgres, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryServerVersion(), dataV140004ServerVersionNum)
- mockExpect(t, m, queryIsSuperUser(), dataV140004IsSuperUserTrue)
- mockExpect(t, m, queryPGIsInRecovery(), dataV140004PGIsInRecoveryTrue)
+ mockExpect(t, m, queryServerVersion(), dataVer140004ServerVersionNum)
+ mockExpect(t, m, queryIsSuperUser(), dataVer140004IsSuperUserTrue)
+ mockExpect(t, m, queryPGIsInRecovery(), dataVer140004PGIsInRecoveryTrue)
- mockExpect(t, m, querySettingsMaxConnections(), dataV140004SettingsMaxConnections)
- mockExpect(t, m, querySettingsMaxLocksHeld(), dataV140004SettingsMaxLocksHeld)
+ mockExpect(t, m, querySettingsMaxConnections(), dataVer140004SettingsMaxConnections)
+ mockExpect(t, m, querySettingsMaxLocksHeld(), dataVer140004SettingsMaxLocksHeld)
mockExpectErr(m, queryServerCurrentConnectionsUsed())
},
@@ -669,7 +661,7 @@ func TestPostgres_Collect(t *testing.T) {
pg.db = db
defer func() { _ = db.Close() }()
- require.True(t, pg.Init())
+ require.NoError(t, pg.Init())
for i, step := range test {
t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
diff --git a/modules/postgres/testdata/config.json b/modules/postgres/testdata/config.json
new file mode 100644
index 000000000..6b39278c5
--- /dev/null
+++ b/modules/postgres/testdata/config.json
@@ -0,0 +1,14 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "timeout": 123.123,
+ "collect_databases_matching": "ok",
+ "transaction_time_histogram": [
+ 123.123
+ ],
+ "query_time_histogram": [
+ 123.123
+ ],
+ "max_db_tables": 123,
+ "max_db_indexes": 123
+}
diff --git a/modules/postgres/testdata/config.yaml b/modules/postgres/testdata/config.yaml
new file mode 100644
index 000000000..36ff5f0b1
--- /dev/null
+++ b/modules/postgres/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+dsn: "ok"
+timeout: 123.123
+collect_databases_matching: "ok"
+transaction_time_histogram:
+ - 123.123
+query_time_histogram:
+ - 123.123
+max_db_tables: 123
+max_db_indexes: 123
diff --git a/modules/powerdns/authoritativens.go b/modules/powerdns/authoritativens.go
index 07b7fdbcf..73ec42f1e 100644
--- a/modules/powerdns/authoritativens.go
+++ b/modules/powerdns/authoritativens.go
@@ -4,6 +4,7 @@ package powerdns
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -29,7 +30,7 @@ func New() *AuthoritativeNS {
URL: "http://127.0.0.1:8081",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -37,43 +38,57 @@ func New() *AuthoritativeNS {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type AuthoritativeNS struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
httpClient *http.Client
- charts *module.Charts
}
-func (ns *AuthoritativeNS) Init() bool {
+func (ns *AuthoritativeNS) Configuration() any {
+ return ns.Config
+}
+
+func (ns *AuthoritativeNS) Init() error {
err := ns.validateConfig()
if err != nil {
ns.Errorf("config validation: %v", err)
- return false
+ return err
}
client, err := ns.initHTTPClient()
if err != nil {
ns.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
ns.httpClient = client
cs, err := ns.initCharts()
if err != nil {
ns.Errorf("init charts: %v", err)
- return false
+ return err
}
ns.charts = cs
- return true
+ return nil
}
-func (ns *AuthoritativeNS) Check() bool {
- return len(ns.Collect()) > 0
+func (ns *AuthoritativeNS) Check() error {
+ mx, err := ns.collect()
+ if err != nil {
+ ns.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (ns *AuthoritativeNS) Charts() *module.Charts {
diff --git a/modules/powerdns/authoritativens_test.go b/modules/powerdns/authoritativens_test.go
index 71e5c6dc4..8c7822cb2 100644
--- a/modules/powerdns/authoritativens_test.go
+++ b/modules/powerdns/authoritativens_test.go
@@ -8,6 +8,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
@@ -16,24 +17,29 @@ import (
)
var (
- v430statistics, _ = os.ReadFile("testdata/v4.3.0/statistics.json")
- recursorStatistics, _ = os.ReadFile("testdata/recursor/statistics.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer430statistics, _ = os.ReadFile("testdata/v4.3.0/statistics.json")
+ dataRecursorStatistics, _ = os.ReadFile("testdata/recursor/statistics.json")
)
-func Test_testDataIsCorrectlyReadAndValid(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v430statistics": v430statistics,
- "recursorStatistics": recursorStatistics,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer430statistics": dataVer430statistics,
+ "dataRecursorStatistics": dataRecursorStatistics,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.IsType(t, (*AuthoritativeNS)(nil), New())
+func TestAuthoritativeNS_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &AuthoritativeNS{}, dataConfigJSON, dataConfigYAML)
}
-func TestRecursor_Init(t *testing.T) {
+func TestAuthoritativeNS_Init(t *testing.T) {
tests := map[string]struct {
config Config
wantFail bool
@@ -70,17 +76,17 @@ func TestRecursor_Init(t *testing.T) {
ns.Config = test.config
if test.wantFail {
- assert.False(t, ns.Init())
+ assert.Error(t, ns.Init())
} else {
- assert.True(t, ns.Init())
+ assert.NoError(t, ns.Init())
}
})
}
}
-func TestRecursor_Check(t *testing.T) {
+func TestAuthoritativeNS_Check(t *testing.T) {
tests := map[string]struct {
- prepare func() (p *AuthoritativeNS, cleanup func())
+ prepare func() (ns *AuthoritativeNS, cleanup func())
wantFail bool
}{
"success on valid response v4.3.0": {
@@ -106,30 +112,30 @@ func TestRecursor_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
- recursor, cleanup := test.prepare()
+ ns, cleanup := test.prepare()
defer cleanup()
- require.True(t, recursor.Init())
+ require.NoError(t, ns.Init())
if test.wantFail {
- assert.False(t, recursor.Check())
+ assert.Error(t, ns.Check())
} else {
- assert.True(t, recursor.Check())
+ assert.NoError(t, ns.Check())
}
})
}
}
-func TestRecursor_Charts(t *testing.T) {
- recursor := New()
- require.True(t, recursor.Init())
- assert.NotNil(t, recursor.Charts())
+func TestAuthoritativeNS_Charts(t *testing.T) {
+ ns := New()
+ require.NoError(t, ns.Init())
+ assert.NotNil(t, ns.Charts())
}
-func TestRecursor_Cleanup(t *testing.T) {
+func TestAuthoritativeNS_Cleanup(t *testing.T) {
assert.NotPanics(t, New().Cleanup)
}
-func TestRecursor_Collect(t *testing.T) {
+func TestAuthoritativeNS_Collect(t *testing.T) {
tests := map[string]struct {
prepare func() (p *AuthoritativeNS, cleanup func())
wantCollected map[string]int64
@@ -236,7 +242,7 @@ func TestRecursor_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
ns, cleanup := test.prepare()
defer cleanup()
- require.True(t, ns.Init())
+ require.NoError(t, ns.Init())
collected := ns.Collect()
@@ -314,7 +320,7 @@ func preparePowerDNSAuthoritativeNSEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathLocalStatistics:
- _, _ = w.Write(v430statistics)
+ _, _ = w.Write(dataVer430statistics)
default:
w.WriteHeader(http.StatusNotFound)
}
@@ -326,7 +332,7 @@ func preparePowerDNSRecursorEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathLocalStatistics:
- _, _ = w.Write(recursorStatistics)
+ _, _ = w.Write(dataRecursorStatistics)
default:
w.WriteHeader(http.StatusNotFound)
}
diff --git a/modules/powerdns/config_schema.json b/modules/powerdns/config_schema.json
index 93f8e72a2..4c440ae07 100644
--- a/modules/powerdns/config_schema.json
+++ b/modules/powerdns/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/powerdns job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "PowerDNS collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the PowerDNS built-in webserver.",
+ "type": "string",
+ "default": "http://127.0.0.1:8081"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/powerdns/init.go b/modules/powerdns/init.go
index a577db773..aefdc5cb9 100644
--- a/modules/powerdns/init.go
+++ b/modules/powerdns/init.go
@@ -10,7 +10,7 @@ import (
"github.com/netdata/go.d.plugin/pkg/web"
)
-func (ns AuthoritativeNS) validateConfig() error {
+func (ns *AuthoritativeNS) validateConfig() error {
if ns.URL == "" {
return errors.New("URL not set")
}
@@ -20,10 +20,10 @@ func (ns AuthoritativeNS) validateConfig() error {
return nil
}
-func (ns AuthoritativeNS) initHTTPClient() (*http.Client, error) {
+func (ns *AuthoritativeNS) initHTTPClient() (*http.Client, error) {
return web.NewHTTPClient(ns.Client)
}
-func (ns AuthoritativeNS) initCharts() (*module.Charts, error) {
+func (ns *AuthoritativeNS) initCharts() (*module.Charts, error) {
return charts.Copy(), nil
}
diff --git a/modules/powerdns/testdata/config.json b/modules/powerdns/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/powerdns/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/powerdns/testdata/config.yaml b/modules/powerdns/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/powerdns/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/powerdns_recursor/config_schema.json b/modules/powerdns_recursor/config_schema.json
index fcd19e150..3893720b0 100644
--- a/modules/powerdns_recursor/config_schema.json
+++ b/modules/powerdns_recursor/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/powerdns_recursor job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "PowerDNS Recursor collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the PowerDNS built-in webserver.",
+ "type": "string",
+ "default": "http://127.0.0.1:8081"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/powerdns_recursor/init.go b/modules/powerdns_recursor/init.go
index aa74eec2f..3d9e226bc 100644
--- a/modules/powerdns_recursor/init.go
+++ b/modules/powerdns_recursor/init.go
@@ -10,7 +10,7 @@ import (
"github.com/netdata/go.d.plugin/pkg/web"
)
-func (r Recursor) validateConfig() error {
+func (r *Recursor) validateConfig() error {
if r.URL == "" {
return errors.New("URL not set")
}
@@ -20,10 +20,10 @@ func (r Recursor) validateConfig() error {
return nil
}
-func (r Recursor) initHTTPClient() (*http.Client, error) {
+func (r *Recursor) initHTTPClient() (*http.Client, error) {
return web.NewHTTPClient(r.Client)
}
-func (r Recursor) initCharts() (*module.Charts, error) {
+func (r *Recursor) initCharts() (*module.Charts, error) {
return charts.Copy(), nil
}
diff --git a/modules/powerdns_recursor/recursor.go b/modules/powerdns_recursor/recursor.go
index cd052ba6d..68e5f79ae 100644
--- a/modules/powerdns_recursor/recursor.go
+++ b/modules/powerdns_recursor/recursor.go
@@ -4,6 +4,7 @@ package powerdns_recursor
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -29,7 +30,7 @@ func New() *Recursor {
URL: "http://127.0.0.1:8081",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -37,43 +38,57 @@ func New() *Recursor {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type Recursor struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
httpClient *http.Client
- charts *module.Charts
}
-func (r *Recursor) Init() bool {
+func (r *Recursor) Configuration() any {
+ return r.Config
+}
+
+func (r *Recursor) Init() error {
err := r.validateConfig()
if err != nil {
r.Errorf("config validation: %v", err)
- return false
+ return err
}
client, err := r.initHTTPClient()
if err != nil {
r.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
r.httpClient = client
cs, err := r.initCharts()
if err != nil {
r.Errorf("init charts: %v", err)
- return false
+ return err
}
r.charts = cs
- return true
+ return nil
}
-func (r *Recursor) Check() bool {
- return len(r.Collect()) > 0
+func (r *Recursor) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (r *Recursor) Charts() *module.Charts {
diff --git a/modules/powerdns_recursor/recursor_test.go b/modules/powerdns_recursor/recursor_test.go
index 4ef3c2d08..f4f8fb234 100644
--- a/modules/powerdns_recursor/recursor_test.go
+++ b/modules/powerdns_recursor/recursor_test.go
@@ -8,6 +8,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
@@ -16,21 +17,26 @@ import (
)
var (
- v431statistics, _ = os.ReadFile("testdata/v4.3.1/statistics.json")
- authoritativeStatistics, _ = os.ReadFile("testdata/authoritative/statistics.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer431statistics, _ = os.ReadFile("testdata/v4.3.1/statistics.json")
+ dataAuthoritativeStatistics, _ = os.ReadFile("testdata/authoritative/statistics.json")
)
-func Test_testDataIsCorrectlyReadAndValid(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v431statistics": v431statistics,
- "authoritativeStatistics": authoritativeStatistics,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer431statistics": dataVer431statistics,
+ "dataAuthoritativeStatistics": dataAuthoritativeStatistics,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Recursor)(nil), New())
+func TestRecursor_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Recursor{}, dataConfigJSON, dataConfigYAML)
}
func TestRecursor_Init(t *testing.T) {
@@ -70,9 +76,9 @@ func TestRecursor_Init(t *testing.T) {
recursor.Config = test.config
if test.wantFail {
- assert.False(t, recursor.Init())
+ assert.Error(t, recursor.Init())
} else {
- assert.True(t, recursor.Init())
+ assert.NoError(t, recursor.Init())
}
})
}
@@ -108,12 +114,12 @@ func TestRecursor_Check(t *testing.T) {
t.Run(name, func(t *testing.T) {
recursor, cleanup := test.prepare()
defer cleanup()
- require.True(t, recursor.Init())
+ require.NoError(t, recursor.Init())
if test.wantFail {
- assert.False(t, recursor.Check())
+ assert.Error(t, recursor.Check())
} else {
- assert.True(t, recursor.Check())
+ assert.NoError(t, recursor.Check())
}
})
}
@@ -121,7 +127,7 @@ func TestRecursor_Check(t *testing.T) {
func TestRecursor_Charts(t *testing.T) {
recursor := New()
- require.True(t, recursor.Init())
+ require.NoError(t, recursor.Init())
assert.NotNil(t, recursor.Charts())
}
@@ -271,7 +277,7 @@ func TestRecursor_Collect(t *testing.T) {
t.Run(name, func(t *testing.T) {
recursor, cleanup := test.prepare()
defer cleanup()
- require.True(t, recursor.Init())
+ require.NoError(t, recursor.Init())
collected := recursor.Collect()
@@ -349,7 +355,7 @@ func preparePowerDNSRecursorEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathLocalStatistics:
- _, _ = w.Write(v431statistics)
+ _, _ = w.Write(dataVer431statistics)
default:
w.WriteHeader(http.StatusNotFound)
}
@@ -361,7 +367,7 @@ func preparePowerDNSAuthoritativeEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathLocalStatistics:
- _, _ = w.Write(authoritativeStatistics)
+ _, _ = w.Write(dataAuthoritativeStatistics)
default:
w.WriteHeader(http.StatusNotFound)
}
diff --git a/modules/powerdns_recursor/testdata/config.json b/modules/powerdns_recursor/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/powerdns_recursor/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/powerdns_recursor/testdata/config.yaml b/modules/powerdns_recursor/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/powerdns_recursor/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/prometheus/collect.go b/modules/prometheus/collect.go
index 4494b8859..0ec93c80b 100644
--- a/modules/prometheus/collect.go
+++ b/modules/prometheus/collect.go
@@ -29,6 +29,7 @@ func (p *Prometheus) collect() (map[string]int64, error) {
return nil, nil
}
+ // TODO: shouldn't modify the value from Config
if p.ExpectedPrefix != "" {
if !hasPrefix(mfs, p.ExpectedPrefix) {
return nil, fmt.Errorf("'%s' metrics have no expected prefix (%s)", p.URL, p.ExpectedPrefix)
@@ -36,6 +37,7 @@ func (p *Prometheus) collect() (map[string]int64, error) {
p.ExpectedPrefix = ""
}
+ // TODO: shouldn't modify the value from Config
if p.MaxTS > 0 {
if n := calcMetrics(mfs); n > p.MaxTS {
return nil, fmt.Errorf("'%s' num of time series (%d) > limit (%d)", p.URL, n, p.MaxTS)
diff --git a/modules/prometheus/config_schema.json b/modules/prometheus/config_schema.json
index 60261d542..a8ea19a37 100644
--- a/modules/prometheus/config_schema.json
+++ b/modules/prometheus/config_schema.json
@@ -1,113 +1,256 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/prometheus job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "selector": {
- "type": "object",
- "properties": {
- "allow": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "deny": {
- "type": "array",
- "items": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Prometheus collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Prometheus endpoint.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 10
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "expected_prefix": {
+ "title": "Expected prefix",
+ "description": "If an endpoint does not return at least one metric with the specified prefix, the data is not processed.",
+ "type": "string"
+ },
+ "app": {
+ "title": "Application",
+ "description": "If set, this value will be used in the chart context as 'prometheus.{app}.{metric_name}'.",
+ "type": "string"
+ },
+ "selector": {
+ "title": "Selectors",
+ "description": "Configuration for selecting and filtering a set of time series using Prometheus selector expressions. The logic is as follows: (allow1 OR allow2) AND !(deny1 or deny2).",
+ "type": "object",
+ "properties": {
+ "allow": {
+ "title": "Allow",
+ "description": "Allow time series that match any of the specified selectors.",
+ "type": "array",
+ "items": {
+ "title": "Selector",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "deny": {
+ "title": "Deny",
+ "description": "Deny time series that match any of the specified selectors.",
+ "type": "array",
+ "items": {
+ "title": "Selector",
+ "type": "string"
+ },
+ "uniqueItems": true
}
}
},
- "required": [
- "allow",
- "deny"
- ]
- },
- "fallback_type": {
- "type": "object",
- "properties": {
- "counter": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "gauge": {
- "type": "array",
- "items": {
- "type": "string"
+ "max_time_series": {
+ "title": "Time series limit",
+ "description": "If an endpoint returns more time series than this limit, the data is not processed. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 2000
+ },
+ "max_time_series_per_metric": {
+ "title": "Time series per metric limit",
+ "description": "Metrics with more time series than this limit are skipped. Set to 0 for no limit.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 2000
+ },
+ "fallback_type": {
+ "title": "Untyped metrics fallback",
+ "description": "Process Untyped metrics as Counter or Gauge instead of ignoring them. Patterns follow shell file name patterns.",
+ "type": "object",
+ "properties": {
+ "gauge": {
+ "title": "As Gauge",
+ "description": "Untyped metrics matching any pattern will be processed as Gauge.",
+ "type": "array",
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
+ },
+ "Counter": {
+ "title": "As Counter",
+ "description": "Untyped metrics matching any pattern will be processed as Counter.",
+ "type": "array",
+ "items": {
+ "title": "Pattern",
+ "type": "string"
+ },
+ "uniqueItems": true
}
}
},
- "required": [
- "counter",
- "gauge"
- ]
- },
- "bearer_token": {
- "type": "string"
- },
- "expected_prefix": {
- "type": "string"
- },
- "max_time_series": {
- "type": "integer"
- },
- "max_time_series_per_metric": {
- "type": "integer"
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "bearer_token_file": {
+ "title": "Bearer token file",
+ "description": "The path to the file with Bearer token.",
+ "type": "string"
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "tls_ca": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_cert": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "tls_key": {
- "type": "string"
+ "proxy_password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "expected_prefix",
+ "app"
+ ]
+ },
+ {
+ "title": "Selectors",
+ "fields": [
+ "selector"
+ ]
+ },
+ {
+ "title": "Limits",
+ "fields": [
+ "max_time_series",
+ "max_time_series_per_metric"
+ ]
+ },
+ {
+ "title": "Untyped fallback",
+ "fields": [
+ "fallback_type"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password",
+ "bearer_token_file"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/prometheus/prometheus.go b/modules/prometheus/prometheus.go
index 32a91e5c2..21917fa1c 100644
--- a/modules/prometheus/prometheus.go
+++ b/modules/prometheus/prometheus.go
@@ -4,6 +4,7 @@ package prometheus
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -31,7 +32,7 @@ func New() *Prometheus {
Config: Config{
HTTP: web.HTTP{
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 10},
+ Timeout: web.Duration(time.Second * 10),
},
},
MaxTS: 2000,
@@ -43,69 +44,80 @@ func New() *Prometheus {
}
type Config struct {
- web.HTTP `yaml:",inline"`
- Name string `yaml:"name"`
- Application string `yaml:"app"`
- BearerTokenFile string `yaml:"bearer_token_file"`
-
- Selector selector.Expr `yaml:"selector"`
-
- ExpectedPrefix string `yaml:"expected_prefix"`
- MaxTS int `yaml:"max_time_series"`
- MaxTSPerMetric int `yaml:"max_time_series_per_metric"`
- FallbackType struct {
- Counter []string `yaml:"counter"`
- Gauge []string `yaml:"gauge"`
- } `yaml:"fallback_type"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Name string `yaml:"name" json:"name"`
+ Application string `yaml:"app" json:"app"`
+ BearerTokenFile string `yaml:"bearer_token_file" json:"bearer_token_file"`
+ Selector selector.Expr `yaml:"selector" json:"selector"`
+ ExpectedPrefix string `yaml:"expected_prefix" json:"expected_prefix"`
+ MaxTS int `yaml:"max_time_series" json:"max_time_series"`
+ MaxTSPerMetric int `yaml:"max_time_series_per_metric" json:"max_time_series_per_metric"`
+ FallbackType struct {
+ Gauge []string `yaml:"gauge" json:"gauge"`
+ Counter []string `yaml:"counter" json:"counter"`
+ } `yaml:"fallback_type" json:"fallback_type"`
}
type Prometheus struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- prom prometheus.Prometheus
- cache *cache
+ prom prometheus.Prometheus
+ cache *cache
fallbackType struct {
counter matcher.Matcher
gauge matcher.Matcher
}
}
-func (p *Prometheus) Init() bool {
+func (p *Prometheus) Configuration() any {
+ return p.Config
+}
+
+func (p *Prometheus) Init() error {
if err := p.validateConfig(); err != nil {
p.Errorf("validating config: %v", err)
- return false
+ return err
}
prom, err := p.initPrometheusClient()
if err != nil {
p.Errorf("init prometheus client: %v", err)
- return false
+ return err
}
p.prom = prom
m, err := p.initFallbackTypeMatcher(p.FallbackType.Counter)
if err != nil {
p.Errorf("init counter fallback type matcher: %v", err)
- return false
+ return err
}
p.fallbackType.counter = m
m, err = p.initFallbackTypeMatcher(p.FallbackType.Gauge)
if err != nil {
p.Errorf("init counter fallback type matcher: %v", err)
- return false
+ return err
}
p.fallbackType.gauge = m
- return true
+ return nil
}
-func (p *Prometheus) Check() bool {
- return len(p.Collect()) > 0
+func (p *Prometheus) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (p *Prometheus) Charts() *module.Charts {
@@ -124,4 +136,8 @@ func (p *Prometheus) Collect() map[string]int64 {
return mx
}
-func (p *Prometheus) Cleanup() {}
+func (p *Prometheus) Cleanup() {
+ if p.prom != nil && p.prom.HTTPClient() != nil {
+ p.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/prometheus/prometheus_test.go b/modules/prometheus/prometheus_test.go
index 95bf55bd2..06b643f40 100644
--- a/modules/prometheus/prometheus_test.go
+++ b/modules/prometheus/prometheus_test.go
@@ -6,6 +6,7 @@ import (
"fmt"
"net/http"
"net/http/httptest"
+ "os"
"testing"
"github.com/netdata/go.d.plugin/agent/module"
@@ -16,6 +17,24 @@ import (
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestPrometheus_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Prometheus{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestPrometheus_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -44,9 +63,9 @@ func TestPrometheus_Init(t *testing.T) {
prom.Config = test.config
if test.wantFail {
- assert.False(t, prom.Init())
+ assert.Error(t, prom.Init())
} else {
- assert.True(t, prom.Init())
+ assert.NoError(t, prom.Init())
}
})
}
@@ -57,7 +76,7 @@ func TestPrometheus_Cleanup(t *testing.T) {
prom := New()
prom.URL = "http://127.0.0.1"
- require.True(t, prom.Init())
+ require.NoError(t, prom.Init())
assert.NotPanics(t, prom.Cleanup)
}
@@ -169,12 +188,12 @@ test_counter_no_meta_metric_1_total{label1="value2"} 11
prom, cleanup := test.prepare()
defer cleanup()
- require.True(t, prom.Init())
+ require.NoError(t, prom.Init())
if test.wantFail {
- assert.False(t, prom.Check())
+ assert.Error(t, prom.Check())
} else {
- assert.True(t, prom.Check())
+ assert.NoError(t, prom.Check())
}
})
}
@@ -558,7 +577,7 @@ test_gauge_no_meta_metric_1{label1="value2"} 12
defer srv.Close()
prom.URL = srv.URL
- require.True(t, prom.Init())
+ require.NoError(t, prom.Init())
for num, step := range test.steps {
t.Run(fmt.Sprintf("step num %d ('%s')", num+1, step.desc), func(t *testing.T) {
diff --git a/modules/prometheus/testdata/config.json b/modules/prometheus/testdata/config.json
new file mode 100644
index 000000000..2e9b2e138
--- /dev/null
+++ b/modules/prometheus/testdata/config.json
@@ -0,0 +1,42 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "name": "ok",
+ "app": "ok",
+ "bearer_token_file": "ok",
+ "selector": {
+ "allow": [
+ "ok"
+ ],
+ "deny": [
+ "ok"
+ ]
+ },
+ "expected_prefix": "ok",
+ "max_time_series": 123,
+ "max_time_series_per_metric": 123,
+ "fallback_type": {
+ "gauge": [
+ "ok"
+ ],
+ "counter": [
+ "ok"
+ ]
+ }
+}
diff --git a/modules/prometheus/testdata/config.yaml b/modules/prometheus/testdata/config.yaml
new file mode 100644
index 000000000..e4548fe4a
--- /dev/null
+++ b/modules/prometheus/testdata/config.yaml
@@ -0,0 +1,33 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+name: "ok"
+app: "ok"
+bearer_token_file: "ok"
+selector:
+ allow:
+ - "ok"
+ deny:
+ - "ok"
+expected_prefix: "ok"
+max_time_series: 123
+max_time_series_per_metric: 123
+fallback_type:
+ gauge:
+ - "ok"
+ counter:
+ - "ok"
\ No newline at end of file
diff --git a/modules/prometheus/wip_meta.yaml b/modules/prometheus/wip_meta.yaml
deleted file mode 100644
index 6583c7c61..000000000
--- a/modules/prometheus/wip_meta.yaml
+++ /dev/null
@@ -1,1453 +0,0 @@
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: NetApp Trident
- link: https://github.com/NetApp/trident
- icon_filename: netapp.svg
- categories:
- - data-collection.storage-mount-points-and-filesystems
- keywords:
- - network monitoring
- - network performance
- - traffic analysis
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor NetApp Trident container storage metrics for efficient storage provisioning and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [NetApp Trident exporter](https://github.com/NetApp/trident).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [NetApp Trident exporter](https://github.com/NetApp/trident) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Scylla-Cluster-Tests
- link: https://github.com/scylladb/scylla-cluster-tests/
- icon_filename: scylla.png
- categories:
- - data-collection.database-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor ScyllaDB cluster test metrics for efficient database testing and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Scylla-Cluster-Tests Exporter](https://github.com/scylladb/scylla-cluster-tests/).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Scylla-Cluster-Tests Exporter](https://github.com/scylladb/scylla-cluster-tests/) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: ScyllaDB
- link: https://github.com/scylladb/scylladb
- icon_filename: scylla.png
- categories:
- - data-collection.database-servers
- keywords:
- - database
- - dbms
- - data storage
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track ScyllaDB NoSQL database metrics for efficient database management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [ScyllaDB exporter](https://github.com/scylladb/scylladb).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [ScyllaDB exporter](https://github.com/scylladb/scylladb) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: StayRTR
- link: https://github.com/bgp/stayrtr
- icon_filename: stayrtr.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track RPKI-to-Router (RTR) protocol metrics for efficient routing security and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [StayRTR Exporter](https://github.com/bgp/stayrtr).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [StayRTR Exporter](https://github.com/bgp/stayrtr) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Wildfly
- link: https://docs.wildfly.org/18/Admin_Guide.html#MicroProfile_Metrics_SmallRye
- icon_filename: wildfly.png
- categories:
- - data-collection.application-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor WildFly (formerly JBoss AS) Java application server metrics for efficient Java application management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Wildfly Exporter](https://docs.wildfly.org/18/Admin_Guide.html#MicroProfile_Metrics_SmallRye).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Wildfly Exporter](https://docs.wildfly.org/18/Admin_Guide.html#MicroProfile_Metrics_SmallRye) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Zeek
- link: https://github.com/zeek/zeek
- icon_filename: zeek.png
- categories:
- - data-collection.apm
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Zeek (formerly Bro) network security monitoring metrics for efficient network security and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Zeek](https://github.com/zeek/zeek).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Zeek](https://github.com/zeek/zeek) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Authelia
- link: https://www.authelia.com/reference/guides/metrics/#prometheus
- icon_filename: authelia.png
- categories:
- - data-collection.authentication-and-authorization
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep tabs on Authelia authentication and authorization metrics for enhanced security and user management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Authelia](https://www.authelia.com/reference/guides/metrics/#prometheus).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Authelia](https://www.authelia.com/reference/guides/metrics/#prometheus) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Butler (Qlik Sense DevOps toolbox)
- link: https://github.com/ptarmiganlabs/butler
- icon_filename: butler.png
- categories:
- - data-collection.apm
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor Butler Qlik Sense DevOps metrics for efficient development and operations management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Butler (Qlik Sense DevOps toolbox) Exporter. WIP](https://github.com/ptarmiganlabs/butler).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Butler (Qlik Sense DevOps toolbox) Exporter. WIP](https://github.com/ptarmiganlabs/butler) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Butler CW (Qlik Sense cache warming tool)
- link: https://github.com/ptarmiganlabs/butler-cw
- icon_filename: butler-cw.png
- categories:
- - data-collection.apm
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep tabs on Butler CW Qlik Sense cache warming metrics for optimized data access and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Butler CW (Qlik Sense cache warming tool) Exporter. WIP](https://github.com/ptarmiganlabs/butler-cw).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Butler CW (Qlik Sense cache warming tool) Exporter. WIP](https://github.com/ptarmiganlabs/butler-cw) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Butler SOS (Qlik Sense monitoring tool)
- link: https://github.com/ptarmiganlabs/butler-sos
- icon_filename: butler-sos.png
- categories:
- - data-collection.apm
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor Butler SOS Qlik Sense metrics for comprehensive performance analysis and management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Butler SOS (Qlik Sense monitoring tool) Exporter](https://github.com/ptarmiganlabs/butler-sos).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Butler SOS (Qlik Sense monitoring tool) Exporter](https://github.com/ptarmiganlabs/butler-sos) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: c-lightning
- link: https://github.com/lightningd/plugins/tree/master/prometheus
- icon_filename: lightning.png
- categories:
- - data-collection.blockchain-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track c-lightning metrics for optimized Lightning Network management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [c-lightning prometheus exporter](https://github.com/lightningd/plugins/tree/master/prometheus).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [c-lightning prometheus exporter](https://github.com/lightningd/plugins/tree/master/prometheus) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Cloudprober
- link: https://github.com/cloudprober/cloudprober
- icon_filename: cloudprober.png
- categories:
- - data-collection.synthetic-checks
- keywords:
- - cloud services
- - cloud computing
- - scalability
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep an eye on cloud service availability and latency with Cloudprober monitoring.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Cloudprober Exporter](https://github.com/cloudprober/cloudprober).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Cloudprober Exporter](https://github.com/cloudprober/cloudprober) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: consrv
- link: https://github.com/mdlayher/consrv
- icon_filename: consrv.png
- categories:
- - data-collection.generic-data-collection
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track serial console bridge server metrics for optimized service discovery and health management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [consrv](https://github.com/mdlayher/consrv).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [consrv](https://github.com/mdlayher/consrv) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: CoreRAD
- link: https://github.com/mdlayher/corerad
- icon_filename: corerad.png
- categories:
- - data-collection.dns-and-dhcp-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor CoreRAD IPv6 router advertisement daemon metrics for efficient network management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [CoreRAD](https://github.com/mdlayher/corerad).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [CoreRAD](https://github.com/mdlayher/corerad) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: CrateDB remote remote read/write adapter
- link: https://github.com/crate/cratedb-prometheus-adapter
- icon_filename: cratedb.png
- categories:
- - data-collection.database-servers
- keywords:
- - database
- - dbms
- - data storage
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track CrateDB metrics for efficient data storage and query performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [CrateDB remote remote read/write adapter](https://github.com/crate/cratedb-prometheus-adapter).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [CrateDB remote remote read/write adapter](https://github.com/crate/cratedb-prometheus-adapter) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: DRBD via drbd-reactor
- link: https://github.com/LINBIT/drbd-reactor
- icon_filename: drbd.png
- categories:
- - data-collection.storage-mount-points-and-filesystems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track DRBD metrics for efficient distributed replicated block device management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [DRBD exporter via drbd-reactor](https://github.com/LINBIT/drbd-reactor).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [DRBD exporter via drbd-reactor](https://github.com/LINBIT/drbd-reactor) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Falco
- link: https://github.com/falcosecurity/falco
- icon_filename: falco.png
- categories:
- - data-collection.security-systems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Falco security metrics for efficient runtime security management and threat detection.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Falco](https://github.com/falcosecurity/falco).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Falco](https://github.com/falcosecurity/falco) by following the instructions mentioned in the exporter README.
-
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: FastNetMon Advanced
- link: https://fastnetmon.com/docs-fnm-advanced/
- icon_filename: fastnetmon.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords:
- - network monitoring
- - network performance
- - traffic analysis
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep tabs on FastNetMon Advanced network monitoring metrics for efficient traffic analysis and DDoS detection.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [FastNetMon Advanced exporter](https://fastnetmon.com/docs-fnm-advanced/).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [FastNetMon Advanced exporter](https://fastnetmon.com/docs-fnm-advanced/) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Faucet SDN Faucet
- link: https://github.com/faucetsdn/faucet
- icon_filename: faucet.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor Faucet software-defined networking metrics for efficient network management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Faucet SDN Faucet Exporter](https://github.com/faucetsdn/faucet).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Faucet SDN Faucet Exporter](https://github.com/faucetsdn/faucet) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Fawkes
- link: https://github.com/fawkesrobotics/fawkes
- icon_filename: fawkes.png
- categories:
- - data-collection.generic-data-collection
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Fawkes Robotic Real-Time Applications metrics for enhanced monitoring.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Fawkes](https://github.com/fawkesrobotics/fawkes).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Fawkes](https://github.com/fawkesrobotics/fawkes) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: gitlab-pages
- link: https://gitlab.com/gitlab-org/gitlab-pages/
- icon_filename: gitlab.png
- categories:
- - data-collection.apm
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track GitLab Pages metrics for optimized static site hosting and management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [gitlab-pages exporter](https://gitlab.com/gitlab-org/gitlab-pages/).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [gitlab-pages exporter](https://gitlab.com/gitlab-org/gitlab-pages/) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: gitlab-workhorse
- link: https://gitlab.com/gitlab-org/gitlab-workhorse
- icon_filename: gitlab.png
- categories:
- - data-collection.apm
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor GitLab Workhorse metrics for efficient web server and reverse proxy management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [gitlab-workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [gitlab-workhorse](https://gitlab.com/gitlab-org/gitlab-workhorse) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: GlusterFS
- link: https://github.com/gluster/gluster-prometheus
- icon_filename: gluster.png
- categories:
- - data-collection.storage-mount-points-and-filesystems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep tabs on GlusterFS distributed file system metrics for optimized storage management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [GlusterFS Exporter](https://github.com/gluster/gluster-prometheus).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [GlusterFS Exporter](https://github.com/gluster/gluster-prometheus) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Google Cloud Status Dashboard
- link: https://github.com/DazWilkin/gcp-status
- icon_filename: gcp.png
- categories:
- - data-collection.cloud-provider-managed
- keywords:
- - cloud services
- - cloud computing
- - scalability
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep an eye on Google Cloud status metrics for efficient service availability management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Google Cloud Status Dashboard exporter](https://github.com/DazWilkin/gcp-status).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Google Cloud Status Dashboard exporter](https://github.com/DazWilkin/gcp-status) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: haraka
- link: https://github.com/mailprotector/haraka-plugin-prometheus
- icon_filename: haraka.png
- categories:
- - data-collection.mail-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor Haraka SMTP server metrics for efficient email delivery and security management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [haraka exporter](https://github.com/mailprotector/haraka-plugin-prometheus).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [haraka exporter](https://github.com/mailprotector/haraka-plugin-prometheus) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Hetzner Cloud CSI Driver (Nodes)
- link: https://github.com/hetznercloud/csi-driver
- icon_filename: hetznercloud.png
- categories:
- - data-collection.cloud-provider-managed
- keywords:
- - cloud services
- - cloud computing
- - scalability
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Hetzner Cloud Container Storage Interface driver metrics for efficient Kubernetes storage management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Hetzner Cloud CSI Driver (Nodes)](https://github.com/hetznercloud/csi-driver).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Hetzner Cloud CSI Driver (Nodes)](https://github.com/hetznercloud/csi-driver) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Lutron Homeworks
- link: https://github.com/jbarwick/homeworks-service
- icon_filename: lutron-homeworks.png
- categories:
- - data-collection.iot-devices
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep an eye on Homeworks home automation system metrics for optimized smart home management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Lutron Homeworks Exporter](https://github.com/jbarwick/homeworks-service).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Lutron Homeworks Exporter](https://github.com/jbarwick/homeworks-service) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: JIRAlert
- link: https://github.com/alin-sinpalean/jiralert
- icon_filename: jira.png
- categories:
- - data-collection.notifications
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep tabs on JIRA issue tracking metrics for optimized project management and collaboration.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [JIRAlert](https://github.com/alin-sinpalean/jiralert).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [JIRAlert](https://github.com/alin-sinpalean/jiralert) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Kafka Configs
- link: https://github.com/EladLeev/kafka-config-metrics
- icon_filename: kafka.svg
- categories:
- - data-collection.message-brokers
- keywords:
- - big data
- - stream processing
- - message broker
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor Kafka configuration metrics for optimized message queue performance and management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Kafka Configs Metrics Exporter](https://github.com/EladLeev/kafka-config-metrics).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Kafka Configs Metrics Exporter](https://github.com/EladLeev/kafka-config-metrics) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Maddy Mail Server t
- link: https://github.com/foxcpp/maddy
- icon_filename: maddy.png
- categories:
- - data-collection.mail-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep tabs on Maddy Mail Server metrics for efficient email delivery and security management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Maddy Mail Server metrics endpoint](https://github.com/foxcpp/maddy).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Maddy Mail Server metrics endpoint](https://github.com/foxcpp/maddy) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Purpleidea Mgmt
- link: https://github.com/purpleidea/mgmt
- icon_filename: mgmtconfig.png
- categories:
- - data-collection.provisioning-systems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep an eye on MGMT configuration management system metrics for efficient infrastructure management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [mgmt exporter](https://github.com/purpleidea/mgmt).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [mgmt exporter](https://github.com/purpleidea/mgmt) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Nebula
- link: https://github.com/immstudios/promexp
- icon_filename: nebula.png
- categories:
- - data-collection.media-streaming-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep an eye on Nebula broadcast system metrics for efficient media broadcasting and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Nebula Exporter (unified exporter for broadcasters)](https://github.com/immstudios/promexp).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Nebula Exporter (unified exporter for broadcasters)](https://github.com/immstudios/promexp) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: NeonKube Service
- link: https://github.com/nforgeio/neonKUBE
- icon_filename: neonkube.png
- categories:
- - data-collection.service-discovery-registry
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Neon CRM metrics for efficient nonprofit management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Neon Service Standard Exporter](https://github.com/nforgeio/neonKUBE).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Neon Service Standard Exporter](https://github.com/nforgeio/neonKUBE) by following the instructions mentioned in the exporter README.
-
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Onionprobe
- link: https://gitlab.torproject.org/tpo/onion-services/onionprobe
- icon_filename: onion.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Tor network metrics for efficient anonymity network performance and management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Onionprobe](https://gitlab.torproject.org/tpo/onion-services/onionprobe).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Onionprobe](https://gitlab.torproject.org/tpo/onion-services/onionprobe) by following the instructions mentioned in the exporter README.
-
-
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Opflex-agent
- link: https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md
- icon_filename: opflex.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep tabs on OpFlex agent metrics for efficient software-defined networking management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Opflex-agent Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Opflex-agent Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Opflex-server
- link: https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md
- icon_filename: opflex.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor OpFlex server metrics for efficient software-defined networking management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Opflex-server Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Opflex-server Exporter](https://github.com/noironetworks/opflex/blob/master/docs/prometheus.md) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: ovn-kubernetes Master
- link: https://github.com/ovn-org/ovn-kubernetes
- icon_filename: kube-ovn.png
- categories:
- - data-collection.kubernetes
- keywords:
- - network monitoring
- - network performance
- - traffic analysis
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track OVN-Kubernetes master metrics for efficient Kubernetes networking management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [ovn-kubernetes Master Exporter](https://github.com/ovn-org/ovn-kubernetes).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [ovn-kubernetes Master Exporter](https://github.com/ovn-org/ovn-kubernetes) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: ovn-kubernetes Node
- link: https://github.com/ovn-org/ovn-kubernetes
- icon_filename: kube-ovn.png
- categories:
- - data-collection.kubernetes
- keywords:
- - network monitoring
- - network performance
- - traffic analysis
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor OVN-Kubernetes node metrics for efficient Kubernetes networking management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [ovn-kubernetes Node Exporter](https://github.com/ovn-org/ovn-kubernetes).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [ovn-kubernetes Node Exporter](https://github.com/ovn-org/ovn-kubernetes) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Pathvector
- link: https://github.com/natesales/pathvector
- icon_filename: pathvector.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor Pathvector BGP routing metrics for efficient
- Border Gateway Protocol management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Pathvector](https://github.com/natesales/pathvector).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Pathvector](https://github.com/natesales/pathvector) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: PCP
- link: https://github.com/performancecopilot/pcp/blob/main/man/man3/pmwebapi.3
- icon_filename: pcp.png
- categories:
- - data-collection.apm
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Performance Co-Pilot system performance metrics for efficient IT infrastructure management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [PCP exporter](https://github.com/performancecopilot/pcp/blob/main/man/man3/pmwebapi.3).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [PCP exporter](https://github.com/performancecopilot/pcp/blob/main/man/man3/pmwebapi.3) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: poudriere
- link: https://rnd.phryk.net/phryk-evil-mad-sciences-llc/prometheus_poudriere
- icon_filename: poudriere.png
- categories:
- - data-collection.freebsd-systems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Poudriere FreeBSD package building and testing metrics for efficient package management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [poudriere exporter](https://rnd.phryk.net/phryk-evil-mad-sciences-llc/prometheus_poudriere).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [poudriere exporter](https://rnd.phryk.net/phryk-evil-mad-sciences-llc/prometheus_poudriere) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: RCT Inverter
- link: https://github.com/svalouch/rctmon
- icon_filename: rct.png
- categories:
- - data-collection.iot-devices
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep an eye on RCT Power inverter metrics for efficient solar energy management and monitoring.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [RctMon - RCT Inverter metrics extractor](https://github.com/svalouch/rctmon).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [RctMon - RCT Inverter metrics extractor](https://github.com/svalouch/rctmon) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: RTRTR
- link: https://github.com/NLnetLabs/rtrtr
- icon_filename: rtrtr.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Keep an eye on RPKI-to-Router (RTR) protocol metrics for efficient routing security and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [rtrtr exporter](https://github.com/NLnetLabs/rtrtr).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [rtrtr exporter](https://github.com/NLnetLabs/rtrtr) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: silverpeak
- link: https://github.com/ipHeaders/silverpeak-prometheus
- icon_filename: silverpeak.png
- categories:
- - data-collection.networking-stack-and-network-interfaces
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor Silver Peak SD-WAN metrics for efficient wide area network management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [silverpeak-prometheus](https://github.com/ipHeaders/silverpeak-prometheus).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [silverpeak-prometheus](https://github.com/ipHeaders/silverpeak-prometheus) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: SmartPi
- link: https://github.com/nDenerserve/SmartPi
- icon_filename: smartpi.png
- categories:
- - data-collection.hardware-devices-and-sensors
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track SmartPi smart meter metrics for efficient energy management and monitoring.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [SmartPi](https://github.com/nDenerserve/SmartPi).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [SmartPi](https://github.com/nDenerserve/SmartPi) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: SRS (v5.0.67+)
- link: https://github.com/ossrs/srs
- icon_filename: srs.jpg
- categories:
- - data-collection.media-streaming-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Simple-RTMP-Server (SRS) metrics for efficient live streaming server management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [SRS (v5.0.67+)](https://github.com/ossrs/srs).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [SRS (v5.0.67+)](https://github.com/ossrs/srs) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: supercronic
- link: https://github.com/aptible/supercronic/
- icon_filename: supercronic.png
- categories:
- - data-collection.provisioning-systems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Supercronic job scheduler metrics for efficient task scheduling and management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [supercronic](https://github.com/aptible/supercronic/).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [supercronic](https://github.com/aptible/supercronic/) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: SUSE Saptune
- link: https://github.com/SUSE/saptune
- icon_filename: suse.png
- categories:
- - data-collection.linux-systems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor SUSE Linux Enterprise Server (SLES) Saptune metrics for efficient system tuning and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [SUSE Saptune exporter](https://github.com/SUSE/saptune).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [SUSE Saptune exporter](https://github.com/SUSE/saptune) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: Tezos Node
- link: https://gitlab.com/tezos/tezos
- icon_filename: tezos.png
- categories:
- - data-collection.blockchain-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Tezos blockchain node metrics for efficient blockchain network management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Tezos Node Exporter](https://gitlab.com/tezos/tezos).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Tezos Node Exporter](https://gitlab.com/tezos/tezos) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: ThirdAI
- link: https://github.com/ThirdAILabs/Demos
- icon_filename: thirdai.png
- categories:
- - data-collection.generic-data-collection
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor ThirdAI platform metrics for efficient management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [ThirdAI exporter](https://github.com/ThirdAILabs/Demos).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [ThirdAI exporter](https://github.com/ThirdAILabs/Demos) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: trickster
- link: https://github.com/trickstercache/trickster
- icon_filename: trickster.png
- categories:
- - data-collection.web-servers-and-web-proxies
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track Trickster caching reverse proxy for time-series databases metrics for efficient time-series data management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [trickster](https://github.com/trickstercache/trickster).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [trickster](https://github.com/trickstercache/trickster) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: ClickHouse
- link: https://github.com/ClickHouse/ClickHouse
- icon_filename: clickhouse.svg
- categories:
- - data-collection.database-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track ClickHouse analytics database metrics for efficient database performance and management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [Yandex ClickHouse Exporter](https://github.com/ClickHouse/ClickHouse).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [Yandex ClickHouse Exporter](https://github.com/ClickHouse/ClickHouse) by following the instructions mentioned in the exporter README.
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: zedhook
- link: https://github.com/mdlayher/zedhook
- icon_filename: zedhook.png
- categories:
- - data-collection.logs-servers
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Monitor ZFS Event Daemon (ZED) metrics for efficient file system event monitoring and management.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [zedhook](https://github.com/mdlayher/zedhook).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [zedhook](https://github.com/mdlayher/zedhook) by following the instructions mentioned in the exporter README.
-
-- <<: *module
- meta:
- <<: *meta
- most_popular: false
- community: true
- monitored_instance:
- name: zrepl internal
- link: https://github.com/zrepl/zrepl
- icon_filename: zrepl.png
- categories:
- - data-collection.storage-mount-points-and-filesystems
- keywords: []
- overview:
- <<: *overview
- data_collection:
- metrics_description: |
- Track ZFS replication metrics using zrepl for efficient file systemreplication management and performance.
- method_description: |
- Metrics are gathered by periodically sending HTTP requests to [zrepl internal exporter](https://github.com/zrepl/zrepl).
- setup:
- <<: *setup
- prerequisites:
- list:
- - title: Install OpenMetrics Exporter
- description: |
- Install [zrepl internal exporter](https://github.com/zrepl/zrepl) by following the instructions mentioned in the exporter README.
diff --git a/modules/proxysql/collect.go b/modules/proxysql/collect.go
index cc35fc02d..dfc559a97 100644
--- a/modules/proxysql/collect.go
+++ b/modules/proxysql/collect.go
@@ -225,14 +225,14 @@ func (p *ProxySQL) openConnection() error {
}
func (p *ProxySQL) doQueryRow(query string, v any) error {
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
return p.db.QueryRowContext(ctx, query).Scan(v)
}
func (p *ProxySQL) doQuery(query string, assign func(column, value string, rowEnd bool)) error {
- ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration())
defer cancel()
rows, err := p.db.QueryContext(ctx, query)
diff --git a/modules/proxysql/config_schema.json b/modules/proxysql/config_schema.json
index 5fab79bc7..42a90f1f3 100644
--- a/modules/proxysql/config_schema.json
+++ b/modules/proxysql/config_schema.json
@@ -1,26 +1,40 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/proxysql job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ProxySQL collector configuration",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the ProxySQL server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "dsn": {
+ "title": "DSN",
+ "description": "ProxySQL server Data Source Name (DSN) specifying the connection details.",
+ "type": "string",
+ "default": "stats:stats@tcp(127.0.0.1:6032)/"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for queries executed against the ProxySQL server.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ }
},
- "dsn": {
- "type": "string"
- },
- "my.cnf": {
- "type": "string"
+ "required": [
+ "dsn"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "dsn"
- ]
+ }
}
diff --git a/modules/proxysql/metadata.yaml b/modules/proxysql/metadata.yaml
index a8ba0e638..2c9562d99 100644
--- a/modules/proxysql/metadata.yaml
+++ b/modules/proxysql/metadata.yaml
@@ -53,7 +53,7 @@ modules:
list:
- name: update_every
description: Data collection frequency.
- default_value: 5
+ default_value: 1
required: false
- name: autodetection_retry
description: Recheck interval in seconds. Zero means no recheck will be scheduled.
@@ -63,10 +63,6 @@ modules:
description: Data Source Name. See [DSN syntax](https://github.com/go-sql-driver/mysql#dsn-data-source-name).
default_value: stats:stats@tcp(127.0.0.1:6032)/
required: true
- - name: my.cnf
- description: Specifies my.cnf file to read connection parameters from under the [client] section.
- default_value: ""
- required: false
- name: timeout
description: Query timeout in seconds.
default_value: 1
diff --git a/modules/proxysql/proxysql.go b/modules/proxysql/proxysql.go
index d52c36efd..7491f6b00 100644
--- a/modules/proxysql/proxysql.go
+++ b/modules/proxysql/proxysql.go
@@ -5,6 +5,7 @@ package proxysql
import (
"database/sql"
_ "embed"
+ "errors"
_ "github.com/go-sql-driver/mysql"
"sync"
"time"
@@ -27,7 +28,7 @@ func New() *ProxySQL {
return &ProxySQL{
Config: Config{
DSN: "stats:stats@tcp(127.0.0.1:6032)/",
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second),
},
charts: baseCharts.Copy(),
@@ -41,37 +42,48 @@ func New() *ProxySQL {
}
type Config struct {
- DSN string `yaml:"dsn"`
- MyCNF string `yaml:"my.cnf"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ DSN string `yaml:"dsn" json:"dsn"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
-type (
- ProxySQL struct {
- module.Base
- Config `yaml:",inline"`
+type ProxySQL struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- db *sql.DB
+ charts *module.Charts
- charts *module.Charts
+ db *sql.DB
- once *sync.Once
- cache *cache
- }
-)
+ once *sync.Once
+ cache *cache
+}
-func (p *ProxySQL) Init() bool {
+func (p *ProxySQL) Configuration() any {
+ return p.Config
+}
+
+func (p *ProxySQL) Init() error {
if p.DSN == "" {
- p.Error("'dsn' not set")
- return false
+ p.Error("dsn not set")
+ return errors.New("dsn not set")
}
p.Debugf("using DSN [%s]", p.DSN)
- return true
+
+ return nil
}
-func (p *ProxySQL) Check() bool {
- return len(p.Collect()) > 0
+func (p *ProxySQL) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (p *ProxySQL) Charts() *module.Charts {
diff --git a/modules/proxysql/proxysql_test.go b/modules/proxysql/proxysql_test.go
index ec31c4d85..968625fcd 100644
--- a/modules/proxysql/proxysql_test.go
+++ b/modules/proxysql/proxysql_test.go
@@ -12,35 +12,46 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- dataV2010Version, _ = os.ReadFile("testdata/v2.0.10/version.txt")
- dataV2010StatsMySQLGlobal, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_global.txt")
- dataV2010StatsMemoryMetrics, _ = os.ReadFile("testdata/v2.0.10/stats_memory_metrics.txt")
- dataV2010StatsMySQLCommandsCounters, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_commands_counters.txt")
- dataV2010StatsMySQLUsers, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_users.txt")
- dataV2010StatsMySQLConnectionPool, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_connection_pool .txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer2010Version, _ = os.ReadFile("testdata/v2.0.10/version.txt")
+ dataVer2010StatsMySQLGlobal, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_global.txt")
+ dataVer2010StatsMemoryMetrics, _ = os.ReadFile("testdata/v2.0.10/stats_memory_metrics.txt")
+ dataVer2010StatsMySQLCommandsCounters, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_commands_counters.txt")
+ dataVer2010StatsMySQLUsers, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_users.txt")
+ dataVer2010StatsMySQLConnectionPool, _ = os.ReadFile("testdata/v2.0.10/stats_mysql_connection_pool .txt")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "dataV2010Version": dataV2010Version,
- "dataV2010StatsMySQLGlobal": dataV2010StatsMySQLGlobal,
- "dataV2010StatsMemoryMetrics": dataV2010StatsMemoryMetrics,
- "dataV2010StatsMySQLCommandsCounters": dataV2010StatsMySQLCommandsCounters,
- "dataV2010StatsMySQLUsers": dataV2010StatsMySQLUsers,
- "dataV2010StatsMySQLConnectionPool": dataV2010StatsMySQLConnectionPool,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer2010Version": dataVer2010Version,
+ "dataVer2010StatsMySQLGlobal": dataVer2010StatsMySQLGlobal,
+ "dataVer2010StatsMemoryMetrics": dataVer2010StatsMemoryMetrics,
+ "dataVer2010StatsMySQLCommandsCounters": dataVer2010StatsMySQLCommandsCounters,
+ "dataVer2010StatsMySQLUsers": dataVer2010StatsMySQLUsers,
+ "dataVer2010StatsMySQLConnectionPool": dataVer2010StatsMySQLConnectionPool,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
_, err := prepareMockRows(data)
- require.NoErrorf(t, err, name)
+ require.NoError(t, err, name)
}
}
+func TestProxySQL_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ProxySQL{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestProxySQL_Init(t *testing.T) {
tests := map[string]struct {
config Config
@@ -62,9 +73,9 @@ func TestProxySQL_Init(t *testing.T) {
proxySQL.Config = test.config
if test.wantFail {
- assert.False(t, proxySQL.Init())
+ assert.Error(t, proxySQL.Init())
} else {
- assert.True(t, proxySQL.Init())
+ assert.NoError(t, proxySQL.Init())
}
})
}
@@ -111,45 +122,45 @@ func TestProxySQL_Check(t *testing.T) {
"success on all queries": {
wantFail: false,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryVersion, dataV2010Version)
- mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal)
- mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics)
- mockExpect(t, m, queryStatsMySQLCommandsCounters, dataV2010StatsMySQLCommandsCounters)
- mockExpect(t, m, queryStatsMySQLUsers, dataV2010StatsMySQLUsers)
- mockExpect(t, m, queryStatsMySQLConnectionPool, dataV2010StatsMySQLConnectionPool)
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
+ mockExpect(t, m, queryStatsMySQLCommandsCounters, dataVer2010StatsMySQLCommandsCounters)
+ mockExpect(t, m, queryStatsMySQLUsers, dataVer2010StatsMySQLUsers)
+ mockExpect(t, m, queryStatsMySQLConnectionPool, dataVer2010StatsMySQLConnectionPool)
},
},
"fails when error on querying global stats": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryVersion, dataV2010Version)
+ mockExpect(t, m, queryVersion, dataVer2010Version)
mockExpectErr(m, queryStatsMySQLGlobal)
},
},
"fails when error on querying memory metrics": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryVersion, dataV2010Version)
- mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal)
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
mockExpectErr(m, queryStatsMySQLMemoryMetrics)
},
},
"fails when error on querying mysql command counters": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryVersion, dataV2010Version)
- mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal)
- mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics)
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
mockExpectErr(m, queryStatsMySQLCommandsCounters)
},
},
"fails when error on querying mysql users": {
wantFail: true,
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryVersion, dataV2010Version)
- mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal)
- mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics)
- mockExpect(t, m, queryStatsMySQLCommandsCounters, dataV2010StatsMySQLCommandsCounters)
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
+ mockExpect(t, m, queryStatsMySQLCommandsCounters, dataVer2010StatsMySQLCommandsCounters)
mockExpectErr(m, queryStatsMySQLUsers)
},
},
@@ -165,14 +176,14 @@ func TestProxySQL_Check(t *testing.T) {
proxySQL.db = db
defer func() { _ = db.Close() }()
- require.True(t, proxySQL.Init())
+ require.NoError(t, proxySQL.Init())
test.prepareMock(t, mock)
if test.wantFail {
- assert.False(t, proxySQL.Check())
+ assert.Error(t, proxySQL.Check())
} else {
- assert.True(t, proxySQL.Check())
+ assert.NoError(t, proxySQL.Check())
}
assert.NoError(t, mock.ExpectationsWereMet())
})
@@ -189,12 +200,12 @@ func TestProxySQL_Collect(t *testing.T) {
"success on all queries (v2.0.10)": {
{
prepareMock: func(t *testing.T, m sqlmock.Sqlmock) {
- mockExpect(t, m, queryVersion, dataV2010Version)
- mockExpect(t, m, queryStatsMySQLGlobal, dataV2010StatsMySQLGlobal)
- mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataV2010StatsMemoryMetrics)
- mockExpect(t, m, queryStatsMySQLCommandsCounters, dataV2010StatsMySQLCommandsCounters)
- mockExpect(t, m, queryStatsMySQLUsers, dataV2010StatsMySQLUsers)
- mockExpect(t, m, queryStatsMySQLConnectionPool, dataV2010StatsMySQLConnectionPool)
+ mockExpect(t, m, queryVersion, dataVer2010Version)
+ mockExpect(t, m, queryStatsMySQLGlobal, dataVer2010StatsMySQLGlobal)
+ mockExpect(t, m, queryStatsMySQLMemoryMetrics, dataVer2010StatsMemoryMetrics)
+ mockExpect(t, m, queryStatsMySQLCommandsCounters, dataVer2010StatsMySQLCommandsCounters)
+ mockExpect(t, m, queryStatsMySQLUsers, dataVer2010StatsMySQLUsers)
+ mockExpect(t, m, queryStatsMySQLConnectionPool, dataVer2010StatsMySQLConnectionPool)
},
check: func(t *testing.T, my *ProxySQL) {
mx := my.Collect()
@@ -1152,7 +1163,7 @@ func TestProxySQL_Collect(t *testing.T) {
my.db = db
defer func() { _ = db.Close() }()
- require.True(t, my.Init())
+ require.NoError(t, my.Init())
for i, step := range test {
t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) {
diff --git a/modules/proxysql/testdata/config.json b/modules/proxysql/testdata/config.json
new file mode 100644
index 000000000..92a65cb5c
--- /dev/null
+++ b/modules/proxysql/testdata/config.json
@@ -0,0 +1,6 @@
+{
+ "update_every": 123,
+ "dsn": "ok",
+ "my.cnf": "ok",
+ "timeout": 123.123
+}
diff --git a/modules/proxysql/testdata/config.yaml b/modules/proxysql/testdata/config.yaml
new file mode 100644
index 000000000..9bb474b94
--- /dev/null
+++ b/modules/proxysql/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+dsn: "ok"
+my.cnf: "ok"
+timeout: 123.123
diff --git a/modules/pulsar/cache.go b/modules/pulsar/cache.go
new file mode 100644
index 000000000..7f113bf86
--- /dev/null
+++ b/modules/pulsar/cache.go
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+func newCache() *cache {
+ return &cache{
+ namespaces: make(map[namespace]bool),
+ topics: make(map[topic]bool),
+ }
+}
+
+type (
+ namespace struct{ name string }
+ topic struct{ namespace, name string }
+ cache struct {
+ namespaces map[namespace]bool
+ topics map[topic]bool
+ }
+)
diff --git a/modules/pulsar/config_schema.json b/modules/pulsar/config_schema.json
index 083eb0b98..ed10c5264 100644
--- a/modules/pulsar/config_schema.json
+++ b/modules/pulsar/config_schema.json
@@ -1,76 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/pulsar job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "topic_filter": {
- "type": "object",
- "properties": {
- "includes": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "excludes": {
- "type": "array",
- "items": {
- "type": "string"
- }
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Pulsar collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 60
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Pulsar metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8080/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
}
- }
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/pulsar/init.go b/modules/pulsar/init.go
new file mode 100644
index 000000000..8cf893b4f
--- /dev/null
+++ b/modules/pulsar/init.go
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package pulsar
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/matcher"
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (p *Pulsar) validateConfig() error {
+ if p.URL == "" {
+ return errors.New("url not set")
+ }
+ return nil
+}
+
+func (p *Pulsar) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(p.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(client, p.Request), nil
+}
+
+func (p *Pulsar) initTopicFilerMatcher() (matcher.Matcher, error) {
+ if p.TopicFilter.Empty() {
+ return matcher.TRUE(), nil
+ }
+ return p.TopicFilter.Parse()
+}
diff --git a/modules/pulsar/pulsar.go b/modules/pulsar/pulsar.go
index 8b0ce9101..6f0dca3e8 100644
--- a/modules/pulsar/pulsar.go
+++ b/modules/pulsar/pulsar.go
@@ -8,11 +8,10 @@ import (
"sync"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -29,22 +28,21 @@ func init() {
}
func New() *Pulsar {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:8080/metrics",
+ return &Pulsar{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8080/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 5),
+ },
},
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ TopicFilter: matcher.SimpleExpr{
+ Includes: nil,
+ Excludes: []string{"*"},
},
},
- TopicFiler: matcher.SimpleExpr{
- Includes: nil,
- Excludes: []string{"*"},
- },
- }
- return &Pulsar{
- Config: config,
once: &sync.Once{},
charts: summaryCharts.Copy(),
nsCharts: namespaceCharts.Copy(),
@@ -54,90 +52,65 @@ func New() *Pulsar {
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- TopicFiler matcher.SimpleExpr `yaml:"topic_filter"`
- }
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ TopicFilter matcher.SimpleExpr `yaml:"topic_filter" json:"topic_filter"`
+}
- Pulsar struct {
- module.Base
- Config `yaml:",inline"`
-
- prom prometheus.Prometheus
- topicFilter matcher.Matcher
- cache *cache
- curCache *cache
- once *sync.Once
- charts *Charts
- nsCharts *Charts
- topicChartsMapping map[string]string
- }
+type Pulsar struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- namespace struct{ name string }
- topic struct{ namespace, name string }
- cache struct {
- namespaces map[namespace]bool
- topics map[topic]bool
- }
-)
+ charts *Charts
+ nsCharts *Charts
-func newCache() *cache {
- return &cache{
- namespaces: make(map[namespace]bool),
- topics: make(map[topic]bool),
- }
+ prom prometheus.Prometheus
+
+ topicFilter matcher.Matcher
+ cache *cache
+ curCache *cache
+ once *sync.Once
+ topicChartsMapping map[string]string
}
-func (p Pulsar) validateConfig() error {
- if p.URL == "" {
- return errors.New("URL is not set")
- }
- return nil
+func (p *Pulsar) Configuration() any {
+ return p.Config
}
-func (p *Pulsar) initClient() error {
- client, err := web.NewHTTPClient(p.Client)
- if err != nil {
+func (p *Pulsar) Init() error {
+ if err := p.validateConfig(); err != nil {
+ p.Errorf("config validation: %v", err)
return err
}
- p.prom = prometheus.New(client, p.Request)
- return nil
-}
-
-func (p *Pulsar) initTopicFiler() error {
- if p.TopicFiler.Empty() {
- p.topicFilter = matcher.TRUE()
- return nil
+ prom, err := p.initPrometheusClient()
+ if err != nil {
+ p.Error(err)
+ return err
}
+ p.prom = prom
- m, err := p.TopicFiler.Parse()
+ m, err := p.initTopicFilerMatcher()
if err != nil {
+ p.Error(err)
return err
}
p.topicFilter = m
+
return nil
}
-func (p *Pulsar) Init() bool {
- if err := p.validateConfig(); err != nil {
- p.Errorf("config validation: %v", err)
- return false
- }
- if err := p.initClient(); err != nil {
- p.Errorf("client initializing: %v", err)
- return false
+func (p *Pulsar) Check() error {
+ mx, err := p.collect()
+ if err != nil {
+ p.Error(err)
+ return err
}
- if err := p.initTopicFiler(); err != nil {
- p.Errorf("topic filer initialization: %v", err)
- return false
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
}
- return true
-}
-
-func (p *Pulsar) Check() bool {
- return len(p.Collect()) > 0
+ return nil
}
func (p *Pulsar) Charts() *Charts {
@@ -156,4 +129,8 @@ func (p *Pulsar) Collect() map[string]int64 {
return mx
}
-func (Pulsar) Cleanup() {}
+func (p *Pulsar) Cleanup() {
+ if p.prom != nil && p.prom.HTTPClient() != nil {
+ p.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/pulsar/pulsar_test.go b/modules/pulsar/pulsar_test.go
index 3bf9468b6..bff9791c5 100644
--- a/modules/pulsar/pulsar_test.go
+++ b/modules/pulsar/pulsar_test.go
@@ -9,31 +9,40 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/matcher"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- metricsNonPulsar, _ = os.ReadFile("testdata/non-pulsar.txt")
- metricsStdV250Namespaces, _ = os.ReadFile("testdata/standalone-v2.5.0-namespaces.txt")
- metricsStdV250Topics, _ = os.ReadFile("testdata/standalone-v2.5.0-topics.txt")
- metricsStdV250Topics2, _ = os.ReadFile("testdata/standalone-v2.5.0-topics-2.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNonPulsarMetrics, _ = os.ReadFile("testdata/non-pulsar.txt")
+ dataVer250Namespaces, _ = os.ReadFile("testdata/standalone-v2.5.0-namespaces.txt")
+ dataVer250Topics, _ = os.ReadFile("testdata/standalone-v2.5.0-topics.txt")
+ dataVer250Topics2, _ = os.ReadFile("testdata/standalone-v2.5.0-topics-2.txt")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, metricsNonPulsar)
- assert.NotNil(t, metricsStdV250Namespaces)
- assert.NotNil(t, metricsStdV250Topics)
- assert.NotNil(t, metricsStdV250Topics2)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNonPulsarMetrics": dataNonPulsarMetrics,
+ "dataVer250Namespaces": dataVer250Namespaces,
+ "dataVer250Topics": dataVer250Topics,
+ "dataVer250Topics2": dataVer250Topics2,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestPulsar_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Pulsar{}, dataConfigJSON, dataConfigYAML)
}
func TestPulsar_Init(t *testing.T) {
@@ -49,8 +58,8 @@ func TestPulsar_Init(t *testing.T) {
},
"bad syntax topic filer": {
config: Config{
- HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metrics"}},
- TopicFiler: matcher.SimpleExpr{Includes: []string{"+"}}},
+ HTTP: web.HTTP{Request: web.Request{URL: "http://127.0.0.1:8080/metrics"}},
+ TopicFilter: matcher.SimpleExpr{Includes: []string{"+"}}},
wantFail: true,
},
"empty URL": {
@@ -71,9 +80,9 @@ func TestPulsar_Init(t *testing.T) {
pulsar.Config = test.config
if test.wantFail {
- assert.False(t, pulsar.Init())
+ assert.Error(t, pulsar.Init())
} else {
- assert.True(t, pulsar.Init())
+ assert.NoError(t, pulsar.Init())
}
})
}
@@ -102,9 +111,9 @@ func TestPulsar_Check(t *testing.T) {
defer srv.Close()
if test.wantFail {
- assert.False(t, pulsar.Check())
+ assert.Error(t, pulsar.Check())
} else {
- assert.True(t, pulsar.Check())
+ assert.NoError(t, pulsar.Check())
}
})
}
@@ -220,12 +229,12 @@ func prepareClientServerStdV250Namespaces(t *testing.T) (*Pulsar, *httptest.Serv
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsStdV250Namespaces)
+ _, _ = w.Write(dataVer250Namespaces)
}))
pulsar := New()
pulsar.URL = srv.URL
- require.True(t, pulsar.Init())
+ require.NoError(t, pulsar.Init())
return pulsar, srv
}
@@ -234,12 +243,12 @@ func prepareClientServerStdV250Topics(t *testing.T) (*Pulsar, *httptest.Server)
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsStdV250Topics)
+ _, _ = w.Write(dataVer250Topics)
}))
pulsar := New()
pulsar.URL = srv.URL
- require.True(t, pulsar.Init())
+ require.NoError(t, pulsar.Init())
return pulsar, srv
}
@@ -258,16 +267,16 @@ func prepareClientServersDynamicStdV250Topics(t *testing.T) (*Pulsar, *httptest.
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
if i%2 == 0 {
- _, _ = w.Write(metricsStdV250Topics)
+ _, _ = w.Write(dataVer250Topics)
} else {
- _, _ = w.Write(metricsStdV250Topics2)
+ _, _ = w.Write(dataVer250Topics2)
}
i++
}))
pulsar := New()
pulsar.URL = srv.URL
- require.True(t, pulsar.Init())
+ require.NoError(t, pulsar.Init())
return pulsar, srv
}
@@ -276,12 +285,12 @@ func prepareClientServerNonPulsar(t *testing.T) (*Pulsar, *httptest.Server) {
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsNonPulsar)
+ _, _ = w.Write(dataNonPulsarMetrics)
}))
pulsar := New()
pulsar.URL = srv.URL
- require.True(t, pulsar.Init())
+ require.NoError(t, pulsar.Init())
return pulsar, srv
}
@@ -295,7 +304,7 @@ func prepareClientServerInvalidData(t *testing.T) (*Pulsar, *httptest.Server) {
pulsar := New()
pulsar.URL = srv.URL
- require.True(t, pulsar.Init())
+ require.NoError(t, pulsar.Init())
return pulsar, srv
}
@@ -309,7 +318,7 @@ func prepareClientServer404(t *testing.T) (*Pulsar, *httptest.Server) {
pulsar := New()
pulsar.URL = srv.URL
- require.True(t, pulsar.Init())
+ require.NoError(t, pulsar.Init())
return pulsar, srv
}
@@ -320,7 +329,7 @@ func prepareClientServerConnectionRefused(t *testing.T) (*Pulsar, *httptest.Serv
pulsar := New()
pulsar.URL = "http://127.0.0.1:38001/metrics"
- require.True(t, pulsar.Init())
+ require.NoError(t, pulsar.Init())
return pulsar, srv
}
diff --git a/modules/pulsar/testdata/config.json b/modules/pulsar/testdata/config.json
new file mode 100644
index 000000000..ab4f38fe0
--- /dev/null
+++ b/modules/pulsar/testdata/config.json
@@ -0,0 +1,28 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "topic_filter": {
+ "includes": [
+ "ok"
+ ],
+ "excludes": [
+ "ok"
+ ]
+ }
+}
diff --git a/modules/pulsar/testdata/config.yaml b/modules/pulsar/testdata/config.yaml
new file mode 100644
index 000000000..f2645d9e9
--- /dev/null
+++ b/modules/pulsar/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+topic_filter:
+ includes:
+ - "ok"
+ excludes:
+ - "ok"
diff --git a/modules/rabbitmq/config_schema.json b/modules/rabbitmq/config_schema.json
index ad9f0e7b0..b486716f1 100644
--- a/modules/rabbitmq/config_schema.json
+++ b/modules/rabbitmq/config_schema.json
@@ -1,62 +1,159 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/rabbitmq job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "RabbitMQ collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the RabbitMQ management API.",
+ "type": "string",
+ "default": "https://127.0.0.1"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "collect_queues_metrics": {
+ "title": "Collect Queues Metrics",
+ "description": "Collect stats for each queue of each virtual host. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used.",
+ "type": "boolean",
+ "default": false
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects",
+ "collect_queues_metrics"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
]
},
- "collect_queues_metrics": {
- "type": "boolean"
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/rabbitmq/rabbitmq.go b/modules/rabbitmq/rabbitmq.go
index 59fe4b153..b0766458f 100644
--- a/modules/rabbitmq/rabbitmq.go
+++ b/modules/rabbitmq/rabbitmq.go
@@ -4,6 +4,7 @@ package rabbitmq
import (
_ "embed"
+ "errors"
"net/http"
"time"
@@ -31,7 +32,7 @@ func New() *RabbitMQ {
Password: "guest",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
CollectQueues: false,
@@ -43,50 +44,62 @@ func New() *RabbitMQ {
}
type Config struct {
- web.HTTP `yaml:",inline"`
- CollectQueues bool `yaml:"collect_queues_metrics"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ CollectQueues bool `yaml:"collect_queues_metrics" json:"collect_queues_metrics"`
}
type (
RabbitMQ struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
httpClient *http.Client
nodeName string
-
- vhosts map[string]bool
- queues map[string]queueCache
+ vhosts map[string]bool
+ queues map[string]queueCache
}
queueCache struct {
name, vhost string
}
)
-func (r *RabbitMQ) Init() bool {
+func (r *RabbitMQ) Configuration() any {
+ return r.Config
+}
+
+func (r *RabbitMQ) Init() error {
if r.URL == "" {
r.Error("'url' can not be empty")
- return false
+ return errors.New("url not set")
}
client, err := web.NewHTTPClient(r.Client)
if err != nil {
r.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
r.httpClient = client
r.Debugf("using URL %s", r.URL)
- r.Debugf("using timeout: %s", r.Timeout.Duration)
+ r.Debugf("using timeout: %s", r.Timeout)
- return true
+ return nil
}
-func (r *RabbitMQ) Check() bool {
- return len(r.Collect()) > 0
+func (r *RabbitMQ) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (r *RabbitMQ) Charts() *module.Charts {
diff --git a/modules/rabbitmq/rabbitmq_test.go b/modules/rabbitmq/rabbitmq_test.go
index c365726aa..16ca4505f 100644
--- a/modules/rabbitmq/rabbitmq_test.go
+++ b/modules/rabbitmq/rabbitmq_test.go
@@ -9,30 +9,40 @@ import (
"path/filepath"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
-
- "github.com/netdata/go.d.plugin/pkg/web"
)
var (
- testOverviewStats, _ = os.ReadFile("testdata/v3.11.5/api-overview.json")
- testNodeStats, _ = os.ReadFile("testdata/v3.11.5/api-nodes-node.json")
- testVhostsStats, _ = os.ReadFile("testdata/v3.11.5/api-vhosts.json")
- testQueuesStats, _ = os.ReadFile("testdata/v3.11.5/api-queues.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataOverviewStats, _ = os.ReadFile("testdata/v3.11.5/api-overview.json")
+ dataNodeStats, _ = os.ReadFile("testdata/v3.11.5/api-nodes-node.json")
+ dataVhostsStats, _ = os.ReadFile("testdata/v3.11.5/api-vhosts.json")
+ dataQueuesStats, _ = os.ReadFile("testdata/v3.11.5/api-queues.json")
)
func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "testOverviewStats": testOverviewStats,
- "testNodeStats": testNodeStats,
- "testVhostsStats": testVhostsStats,
- "testQueuesStats": testQueuesStats,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataOverviewStats": dataOverviewStats,
+ "dataNodeStats": dataNodeStats,
+ "dataVhostsStats": dataVhostsStats,
+ "dataQueuesStats": dataQueuesStats,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
+func TestRabbitMQ_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &RabbitMQ{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestRabbitMQ_Init(t *testing.T) {
tests := map[string]struct {
wantFail bool
@@ -58,9 +68,9 @@ func TestRabbitMQ_Init(t *testing.T) {
rabbit.Config = test.config
if test.wantFail {
- assert.False(t, rabbit.Init())
+ assert.Error(t, rabbit.Init())
} else {
- assert.True(t, rabbit.Init())
+ assert.NoError(t, rabbit.Init())
}
})
}
@@ -74,7 +84,7 @@ func TestRabbitMQ_Cleanup(t *testing.T) {
assert.NotPanics(t, New().Cleanup)
rabbit := New()
- require.True(t, rabbit.Init())
+ require.NoError(t, rabbit.Init())
assert.NotPanics(t, rabbit.Cleanup)
}
@@ -94,12 +104,12 @@ func TestRabbitMQ_Check(t *testing.T) {
rabbit, cleanup := test.prepare()
defer cleanup()
- require.True(t, rabbit.Init())
+ require.NoError(t, rabbit.Init())
if test.wantFail {
- assert.False(t, rabbit.Check())
+ assert.Error(t, rabbit.Check())
} else {
- assert.True(t, rabbit.Check())
+ assert.NoError(t, rabbit.Check())
}
})
}
@@ -285,7 +295,7 @@ func TestRabbitMQ_Collect(t *testing.T) {
rabbit, cleanup := test.prepare()
defer cleanup()
- require.True(t, rabbit.Init())
+ require.NoError(t, rabbit.Init())
mx := rabbit.Collect()
@@ -332,13 +342,13 @@ func prepareRabbitMQEndpoint() *httptest.Server {
func(w http.ResponseWriter, r *http.Request) {
switch r.URL.Path {
case urlPathAPIOverview:
- _, _ = w.Write(testOverviewStats)
+ _, _ = w.Write(dataOverviewStats)
case filepath.Join(urlPathAPINodes, "rabbit@localhost"):
- _, _ = w.Write(testNodeStats)
+ _, _ = w.Write(dataNodeStats)
case urlPathAPIVhosts:
- _, _ = w.Write(testVhostsStats)
+ _, _ = w.Write(dataVhostsStats)
case urlPathAPIQueues:
- _, _ = w.Write(testQueuesStats)
+ _, _ = w.Write(dataQueuesStats)
default:
w.WriteHeader(404)
}
diff --git a/modules/rabbitmq/testdata/config.json b/modules/rabbitmq/testdata/config.json
new file mode 100644
index 000000000..b3f637f06
--- /dev/null
+++ b/modules/rabbitmq/testdata/config.json
@@ -0,0 +1,21 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "collect_queues_metrics": true
+}
diff --git a/modules/rabbitmq/testdata/config.yaml b/modules/rabbitmq/testdata/config.yaml
new file mode 100644
index 000000000..12bb79bec
--- /dev/null
+++ b/modules/rabbitmq/testdata/config.yaml
@@ -0,0 +1,18 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+collect_queues_metrics: yes
diff --git a/modules/redis/config_schema.json b/modules/redis/config_schema.json
index ed25da9de..771a851d8 100644
--- a/modules/redis/config_schema.json
+++ b/modules/redis/config_schema.json
@@ -1,44 +1,112 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/redis job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Redis collector configuration.",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Redis URL",
+ "description": "The URL specifying the connection details for the Redis server.",
+ "type": "string",
+ "default": "redis://@localhost:9221"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection, read, and write timeout duration in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "ping_samples": {
+ "title": "Ping samples",
+ "description": "The number of PING commands to send per data collection interval. Used to calculate latency.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "address": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "username": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "ping_samples": {
- "type": "integer"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "tls_skip_verify": {
- "type": "boolean"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout",
+ "ping_samples"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/redis/init.go b/modules/redis/init.go
index ffed274c3..072febb17 100644
--- a/modules/redis/init.go
+++ b/modules/redis/init.go
@@ -42,9 +42,9 @@ func (r *Redis) initRedisClient() (*redis.Client, error) {
opts.PoolSize = 1
opts.TLSConfig = tlsConfig
- opts.DialTimeout = r.Timeout.Duration
- opts.ReadTimeout = r.Timeout.Duration
- opts.WriteTimeout = r.Timeout.Duration
+ opts.DialTimeout = r.Timeout.Duration()
+ opts.ReadTimeout = r.Timeout.Duration()
+ opts.WriteTimeout = r.Timeout.Duration()
return redis.NewClient(opts), nil
}
diff --git a/modules/redis/redis.go b/modules/redis/redis.go
index 2117cc2ce..6d9836ce5 100644
--- a/modules/redis/redis.go
+++ b/modules/redis/redis.go
@@ -5,6 +5,7 @@ package redis
import (
"context"
_ "embed"
+ "errors"
"sync"
"time"
@@ -31,7 +32,7 @@ func New() *Redis {
return &Redis{
Config: Config{
Address: "redis://@localhost:6379",
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
PingSamples: 5,
},
@@ -44,31 +45,29 @@ func New() *Redis {
}
type Config struct {
- Address string `yaml:"address"`
- Password string `yaml:"password"`
- Username string `yaml:"username"`
- Timeout web.Duration `yaml:"timeout"`
- PingSamples int `yaml:"ping_samples"`
- tlscfg.TLSConfig `yaml:",inline"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ Username string `yaml:"username" json:"username"`
+ Password string `yaml:"password" json:"password"`
+ PingSamples int `yaml:"ping_samples" json:"ping_samples"`
}
type (
Redis struct {
module.Base
- Config `yaml:",inline"`
-
- charts *module.Charts
-
- rdb redisClient
-
- server string
- version *semver.Version
+ Config `yaml:",inline" json:""`
+ charts *module.Charts
addAOFChartsOnce *sync.Once
addReplSlaveChartsOnce *sync.Once
- pingSummary metrics.Summary
+ rdb redisClient
+ server string
+ version *semver.Version
+ pingSummary metrics.Summary
collectedCommands map[string]bool
collectedDbs map[string]bool
}
@@ -79,32 +78,44 @@ type (
}
)
-func (r *Redis) Init() bool {
+func (r *Redis) Configuration() any {
+ return r.Config
+}
+
+func (r *Redis) Init() error {
err := r.validateConfig()
if err != nil {
r.Errorf("config validation: %v", err)
- return false
+ return err
}
rdb, err := r.initRedisClient()
if err != nil {
r.Errorf("init redis client: %v", err)
- return false
+ return err
}
r.rdb = rdb
charts, err := r.initCharts()
if err != nil {
r.Errorf("init charts: %v", err)
- return false
+ return err
}
r.charts = charts
- return true
+ return nil
}
-func (r *Redis) Check() bool {
- return len(r.Collect()) > 0
+func (r *Redis) Check() error {
+ mx, err := r.collect()
+ if err != nil {
+ r.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (r *Redis) Charts() *module.Charts {
diff --git a/modules/redis/redis_test.go b/modules/redis/redis_test.go
index 9ee2f54f0..6528177de 100644
--- a/modules/redis/redis_test.go
+++ b/modules/redis/redis_test.go
@@ -9,6 +9,7 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/go-redis/redis/v8"
@@ -17,21 +18,26 @@ import (
)
var (
- pikaInfoAll, _ = os.ReadFile("testdata/pika/info_all.txt")
- v609InfoAll, _ = os.ReadFile("testdata/v6.0.9/info_all.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataPikaInfoAll, _ = os.ReadFile("testdata/pika/info_all.txt")
+ dataVer609InfoAll, _ = os.ReadFile("testdata/v6.0.9/info_all.txt")
)
-func Test_Testdata(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "pikaInfoAll": pikaInfoAll,
- "v609InfoAll": v609InfoAll,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataPikaInfoAll": dataPikaInfoAll,
+ "dataVer609InfoAll": dataVer609InfoAll,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Redis)(nil), New())
+func TestRedis_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Redis{}, dataConfigJSON, dataConfigYAML)
}
func TestRedis_Init(t *testing.T) {
@@ -65,9 +71,9 @@ func TestRedis_Init(t *testing.T) {
rdb.Config = test.config
if test.wantFail {
- assert.False(t, rdb.Init())
+ assert.Error(t, rdb.Init())
} else {
- assert.True(t, rdb.Init())
+ assert.NoError(t, rdb.Init())
}
})
}
@@ -96,9 +102,9 @@ func TestRedis_Check(t *testing.T) {
rdb := test.prepare(t)
if test.wantFail {
- assert.False(t, rdb.Check())
+ assert.Error(t, rdb.Check())
} else {
- assert.True(t, rdb.Check())
+ assert.NoError(t, rdb.Check())
}
})
}
@@ -106,7 +112,7 @@ func TestRedis_Check(t *testing.T) {
func TestRedis_Charts(t *testing.T) {
rdb := New()
- require.True(t, rdb.Init())
+ require.NoError(t, rdb.Init())
assert.NotNil(t, rdb.Charts())
}
@@ -115,7 +121,7 @@ func TestRedis_Cleanup(t *testing.T) {
rdb := New()
assert.NotPanics(t, rdb.Cleanup)
- require.True(t, rdb.Init())
+ require.NoError(t, rdb.Init())
m := &mockRedisClient{}
rdb.rdb = m
@@ -308,16 +314,16 @@ func TestRedis_Collect(t *testing.T) {
func prepareRedisV609(t *testing.T) *Redis {
rdb := New()
- require.True(t, rdb.Init())
+ require.NoError(t, rdb.Init())
rdb.rdb = &mockRedisClient{
- result: v609InfoAll,
+ result: dataVer609InfoAll,
}
return rdb
}
func prepareRedisErrorOnInfo(t *testing.T) *Redis {
rdb := New()
- require.True(t, rdb.Init())
+ require.NoError(t, rdb.Init())
rdb.rdb = &mockRedisClient{
errOnInfo: true,
}
@@ -326,9 +332,9 @@ func prepareRedisErrorOnInfo(t *testing.T) *Redis {
func prepareRedisWithPikaMetrics(t *testing.T) *Redis {
rdb := New()
- require.True(t, rdb.Init())
+ require.NoError(t, rdb.Init())
rdb.rdb = &mockRedisClient{
- result: pikaInfoAll,
+ result: dataPikaInfoAll,
}
return rdb
}
diff --git a/modules/redis/testdata/config.json b/modules/redis/testdata/config.json
new file mode 100644
index 000000000..050cfa3f4
--- /dev/null
+++ b/modules/redis/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "username": "ok",
+ "password": "ok",
+ "ping_samples": 123,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/redis/testdata/config.yaml b/modules/redis/testdata/config.yaml
new file mode 100644
index 000000000..57c5cf7ea
--- /dev/null
+++ b/modules/redis/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+username: "ok"
+password: "ok"
+ping_samples: 123
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/scaleio/collect_sdc.go b/modules/scaleio/collect_sdc.go
index 495b1a031..be05f5c33 100644
--- a/modules/scaleio/collect_sdc.go
+++ b/modules/scaleio/collect_sdc.go
@@ -4,7 +4,7 @@ package scaleio
import "github.com/netdata/go.d.plugin/modules/scaleio/client"
-func (s ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics {
+func (s *ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics {
ms := make(map[string]sdcMetrics, len(ss))
for id, stats := range ss {
diff --git a/modules/scaleio/collect_storage_pool.go b/modules/scaleio/collect_storage_pool.go
index 7a41b66bd..dcaf01950 100644
--- a/modules/scaleio/collect_storage_pool.go
+++ b/modules/scaleio/collect_storage_pool.go
@@ -4,7 +4,7 @@ package scaleio
import "github.com/netdata/go.d.plugin/modules/scaleio/client"
-func (s ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics {
+func (s *ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics {
ms := make(map[string]storagePoolMetrics, len(ss))
for id, stats := range ss {
diff --git a/modules/scaleio/collect_system.go b/modules/scaleio/collect_system.go
index 6806e1969..e28fcee6c 100644
--- a/modules/scaleio/collect_system.go
+++ b/modules/scaleio/collect_system.go
@@ -4,7 +4,7 @@ package scaleio
import "github.com/netdata/go.d.plugin/modules/scaleio/client"
-func (ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics {
+func (s *ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics {
var sm systemMetrics
collectSystemCapacity(&sm, ss)
collectSystemWorkload(&sm, ss)
diff --git a/modules/scaleio/config_schema.json b/modules/scaleio/config_schema.json
index 66230acc9..41315b3b9 100644
--- a/modules/scaleio/config_schema.json
+++ b/modules/scaleio/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/scaleio job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "ScaleIO collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the VxFlex OS Gateway API.",
+ "type": "string",
+ "default": "http://127.0.0.1/stub_status"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/scaleio/scaleio.go b/modules/scaleio/scaleio.go
index 05bb03c5b..6b0a0cb53 100644
--- a/modules/scaleio/scaleio.go
+++ b/modules/scaleio/scaleio.go
@@ -4,12 +4,12 @@ package scaleio
import (
_ "embed"
+ "errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/modules/scaleio/client"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -22,40 +22,39 @@ func init() {
})
}
-// New creates ScaleIO with default values.
func New() *ScaleIO {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "https://127.0.0.1",
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ return &ScaleIO{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "https://127.0.0.1",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
- return &ScaleIO{
- Config: config,
charts: systemCharts.Copy(),
charted: make(map[string]bool),
}
}
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+
type (
- // Config is the ScaleIO module configuration.
- Config struct {
- web.HTTP `yaml:",inline"`
- }
- // ScaleIO ScaleIO module.
ScaleIO struct {
module.Base
- Config `yaml:",inline"`
- client *client.Client
+ Config `yaml:",inline" json:""`
+
charts *module.Charts
- discovered instances
- charted map[string]bool
+ client *client.Client
+ discovered instances
+ charted map[string]bool
lastDiscoveryOK bool
runs int
}
@@ -65,40 +64,49 @@ type (
}
)
-// Init makes initialization.
-func (s *ScaleIO) Init() bool {
+func (s *ScaleIO) Configuration() any {
+ return s.Config
+}
+
+func (s *ScaleIO) Init() error {
if s.Username == "" || s.Password == "" {
s.Error("username and password aren't set")
- return false
+ return errors.New("username and password aren't set")
}
c, err := client.New(s.Client, s.Request)
if err != nil {
s.Errorf("error on creating ScaleIO client: %v", err)
- return false
+ return err
}
s.client = c
s.Debugf("using URL %s", s.URL)
- s.Debugf("using timeout: %s", s.Timeout.Duration)
- return true
+ s.Debugf("using timeout: %s", s.Timeout)
+
+ return nil
}
-// Check makes check.
-func (s *ScaleIO) Check() bool {
+func (s *ScaleIO) Check() error {
if err := s.client.Login(); err != nil {
s.Error(err)
- return false
+ return err
+ }
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
}
- return len(s.Collect()) > 0
+ return nil
}
-// Charts returns Charts.
func (s *ScaleIO) Charts() *module.Charts {
return s.charts
}
-// Collect collects metrics.
func (s *ScaleIO) Collect() map[string]int64 {
mx, err := s.collect()
if err != nil {
@@ -112,7 +120,6 @@ func (s *ScaleIO) Collect() map[string]int64 {
return mx
}
-// Cleanup makes cleanup.
func (s *ScaleIO) Cleanup() {
if s.client == nil {
return
diff --git a/modules/scaleio/scaleio_test.go b/modules/scaleio/scaleio_test.go
index 5547b174b..e9bc84cf7 100644
--- a/modules/scaleio/scaleio_test.go
+++ b/modules/scaleio/scaleio_test.go
@@ -8,25 +8,34 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/modules/scaleio/client"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- selectedStatisticsData, _ = os.ReadFile("testdata/selected_statistics.json")
- instancesData, _ = os.ReadFile("testdata/instances.json")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataSelectedStatistics, _ = os.ReadFile("testdata/selected_statistics.json")
+ dataInstances, _ = os.ReadFile("testdata/instances.json")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, selectedStatisticsData)
- assert.NotNil(t, instancesData)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataSelectedStatistics": dataSelectedStatistics,
+ "dataInstances": dataInstances,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestScaleIO_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &ScaleIO{}, dataConfigJSON, dataConfigYAML)
}
func TestScaleIO_Init(t *testing.T) {
@@ -34,10 +43,10 @@ func TestScaleIO_Init(t *testing.T) {
scaleIO.Username = "username"
scaleIO.Password = "password"
- assert.True(t, scaleIO.Init())
+ assert.NoError(t, scaleIO.Init())
}
func TestScaleIO_Init_UsernameAndPasswordNotSet(t *testing.T) {
- assert.False(t, New().Init())
+ assert.Error(t, New().Init())
}
func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) {
@@ -46,24 +55,24 @@ func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) {
job.Password = "password"
job.Client.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestScaleIO_Check(t *testing.T) {
srv, _, scaleIO := prepareSrvMockScaleIO(t)
defer srv.Close()
- require.True(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Init())
- assert.True(t, scaleIO.Check())
+ assert.NoError(t, scaleIO.Check())
}
func TestScaleIO_Check_ErrorOnLogin(t *testing.T) {
srv, mock, scaleIO := prepareSrvMockScaleIO(t)
defer srv.Close()
- require.True(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Init())
mock.Password = "new password"
- assert.False(t, scaleIO.Check())
+ assert.Error(t, scaleIO.Check())
}
func TestScaleIO_Charts(t *testing.T) {
@@ -73,8 +82,8 @@ func TestScaleIO_Charts(t *testing.T) {
func TestScaleIO_Cleanup(t *testing.T) {
srv, _, scaleIO := prepareSrvMockScaleIO(t)
defer srv.Close()
- require.True(t, scaleIO.Init())
- require.True(t, scaleIO.Check())
+ require.NoError(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Check())
scaleIO.Cleanup()
assert.False(t, scaleIO.client.LoggedIn())
@@ -83,8 +92,8 @@ func TestScaleIO_Cleanup(t *testing.T) {
func TestScaleIO_Collect(t *testing.T) {
srv, _, scaleIO := prepareSrvMockScaleIO(t)
defer srv.Close()
- require.True(t, scaleIO.Init())
- require.True(t, scaleIO.Check())
+ require.NoError(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Check())
expected := map[string]int64{
"sdc_6076fd0f00000000_bandwidth_read": 0,
@@ -297,8 +306,8 @@ func TestScaleIO_Collect(t *testing.T) {
func TestScaleIO_Collect_ConnectionRefused(t *testing.T) {
srv, _, scaleIO := prepareSrvMockScaleIO(t)
defer srv.Close()
- require.True(t, scaleIO.Init())
- require.True(t, scaleIO.Check())
+ require.NoError(t, scaleIO.Init())
+ require.NoError(t, scaleIO.Check())
scaleIO.client.Request.URL = "http://127.0.0.1:38001"
assert.Nil(t, scaleIO.Collect())
@@ -349,11 +358,11 @@ func prepareSrvMockScaleIO(t *testing.T) (*httptest.Server, *client.MockScaleIOA
token = "token"
)
var stats client.SelectedStatistics
- err := json.Unmarshal(selectedStatisticsData, &stats)
+ err := json.Unmarshal(dataSelectedStatistics, &stats)
require.NoError(t, err)
var ins client.Instances
- err = json.Unmarshal(instancesData, &ins)
+ err = json.Unmarshal(dataInstances, &ins)
require.NoError(t, err)
mock := client.MockScaleIOAPIServer{
diff --git a/modules/scaleio/testdata/config.json b/modules/scaleio/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/scaleio/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/scaleio/testdata/config.yaml b/modules/scaleio/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/scaleio/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/snmp/config_schema.json b/modules/snmp/config_schema.json
index dd4e9c3ca..5c366256e 100644
--- a/modules/snmp/config_schema.json
+++ b/modules/snmp/config_schema.json
@@ -1,188 +1,338 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "update_every": {
- "type": "integer"
- },
- "hostname": {
- "type": "string"
- },
- "community": {
- "type": "string"
- },
- "user": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "level": {
- "type": "string",
- "enum": [
- "none",
- "authNoPriv",
- "authPriv"
- ]
- },
- "auth_proto": {
- "type": "string",
- "enum": [
- "none",
- "md5",
- "sha",
- "sha224",
- "sha256",
- "sha384",
- "sha512"
- ]
- },
- "auth_key": {
- "type": "string"
- },
- "priv_proto": {
- "type": "string",
- "enum": [
- "none",
- "des",
- "aes",
- "aes192",
- "aes256",
- "aes192c"
- ]
- },
- "priv_key": {
- "type": "string"
- }
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
},
- "required": [
- "name",
- "level",
- "auth_proto",
- "auth_key",
- "priv_proto",
- "priv_key"
- ]
- },
- "options": {
- "type": "object",
- "properties": {
- "port": {
- "type": "integer"
- },
- "retries": {
- "type": "integer"
- },
- "timeout": {
- "type": "integer"
- },
- "version": {
- "type": "string",
- "enum": [
- "1",
- "2",
- "3"
- ]
- },
- "max_request_size": {
- "type": "integer"
- }
+ "hostname": {
+ "title": "Hostname",
+ "type": "string"
},
- "required": [
- "port",
- "retries",
- "timeout",
- "version",
- "max_request_size"
- ]
- },
- "charts": {
- "type": "array",
- "items": {
+ "community": {
+ "title": "SNMPv1/2 community",
+ "type": "string",
+ "default": "public"
+ },
+ "options": {
+ "title": "Options",
"type": "object",
"properties": {
- "id": {
- "type": "string"
+ "version": {
+ "title": "SNMP version",
+ "type": "string",
+ "enum": [
+ "1",
+ "2c",
+ "3"
+ ],
+ "default": "2c"
},
- "title": {
- "type": "string"
+ "port": {
+ "title": "Port",
+ "description": "",
+ "type": "integer",
+ "exclusiveMinimum": 0,
+ "default": 161
},
- "units": {
- "type": "string"
+ "retries": {
+ "title": "Retries",
+ "description": "Retries to attempt.",
+ "type": "integer",
+ "minimum": 0,
+ "default": 161
},
- "family": {
- "type": "string"
+ "timeout": {
+ "title": "Timeout",
+ "description": "SNMP request/response timeout.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
},
- "type": {
+ "max_request_size": {
+ "title": "Max OIDs in request",
+ "description": "Maximum number of OIDs allowed in one one SNMP request.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 60
+ }
+ },
+ "required": [
+ "version",
+ "port",
+ "retries",
+ "timeout",
+ "max_request_size"
+ ]
+ },
+ "user": {
+ "title": "SNMPv3 configuration",
+ "type": "object",
+ "properties": {
+ "name": {
+ "title": "Username",
"type": "string"
},
- "priority": {
- "type": "integer"
+ "level": {
+ "title": "Security level",
+ "description": "Controls the security aspects of SNMPv3 communication, including authentication and encryption.",
+ "type": "string",
+ "enum": [
+ "none",
+ "authNoPriv",
+ "authPriv"
+ ],
+ "default": "authPriv"
},
- "multiply_range": {
- "type": "array",
- "items": {
- "type": "integer"
- }
+ "auth_proto": {
+ "title": "Authentication protocol",
+ "type": "string",
+ "enum": [
+ "none",
+ "md5",
+ "sha",
+ "sha224",
+ "sha256",
+ "sha384",
+ "sha512"
+ ],
+ "default": "sha512"
},
- "dimensions": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "oid": {
- "type": "string"
- },
- "name": {
- "type": "string"
- },
- "algorithm": {
- "type": "string",
- "enum": [
- "absolute",
- "incremental"
- ]
- },
- "multiplier": {
- "type": "integer"
+ "auth_key": {
+ "title": "Authentication passphrase",
+ "type": "string"
+ },
+ "priv_proto": {
+ "title": "Privacy protocol",
+ "type": "string",
+ "enum": [
+ "none",
+ "des",
+ "aes",
+ "aes192",
+ "aes256",
+ "aes192c"
+ ],
+ "default": "aes192c"
+ },
+ "priv_key": {
+ "title": "Privacy passphrase",
+ "type": "string"
+ }
+ }
+ },
+ "charts": {
+ "title": "Charts configuration",
+ "type": "array",
+ "uniqueItems": true,
+ "minItems": 1,
+ "items": {
+ "title": "Chart",
+ "type": "object",
+ "properties": {
+ "id": {
+ "title": "ID",
+ "description": "Unique identifier for the chart.",
+ "type": "string"
+ },
+ "title": {
+ "title": "Title",
+ "description": "Title of the chart.",
+ "type": "string"
+ },
+ "units": {
+ "title": "Units",
+ "description": "Unit label for the vertical axis on charts.",
+ "type": "string"
+ },
+ "family": {
+ "title": "Family",
+ "description": "Subsection on the dashboard where the chart will be displayed.",
+ "type": "string"
+ },
+ "type": {
+ "title": "Type",
+ "type": "string",
+ "enum": [
+ "line",
+ "area",
+ "stacked"
+ ],
+ "default": "line"
+ },
+ "priority": {
+ "title": "Priority",
+ "description": "Rendering priority of the chart on the dashboard. Lower priority values will cause the chart to appear before those with higher priority values.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 90000
+ },
+ "multiply_range": {
+ "type": "array",
+ "items": {
+ "type": "integer"
+ }
+ },
+ "dimensions": {
+ "title": "Dimensions",
+ "description": "Configuration for dimensions of the chart.",
+ "type": "array",
+ "uniqueItems": true,
+ "minItems": 1,
+ "items": {
+ "title": "Dimension configuration",
+ "type": "object",
+ "properties": {
+ "oid": {
+ "title": "OID",
+ "description": "SNMP OID.",
+ "type": "string"
+ },
+ "name": {
+ "title": "Dimension",
+ "description": "Name of the dimension.",
+ "type": "string"
+ },
+ "algorithm": {
+ "title": "Algorithm",
+ "description": "Algorithm of the dimension.",
+ "type": "string",
+ "enum": [
+ "absolute",
+ "incremental"
+ ],
+ "default": "absolute"
+ },
+ "multiplier": {
+ "title": "Multiplier",
+ "description": "Value to multiply the collected value.",
+ "type": "integer",
+ "not": {
+ "const": 0
+ },
+ "default": 1
+ },
+ "divisor": {
+ "title": "Divisor",
+ "description": "Value to divide the collected value.",
+ "type": "integer",
+ "not": {
+ "const": 0
+ },
+ "default": 1
+ }
},
- "divisor": {
- "type": "integer"
- }
- },
- "required": [
- "oid",
- "name",
- "algorithm",
- "multiplier",
- "divisor"
- ]
+ "required": [
+ "oid",
+ "name",
+ "algorithm",
+ "multiplier",
+ "divisor"
+ ]
+ }
}
+ },
+ "required": [
+ "id",
+ "title",
+ "units",
+ "family",
+ "type",
+ "priority",
+ "dimensions"
+ ]
+ }
+ }
+ },
+ "required": [
+ "hostname",
+ "community",
+ "options",
+ "charts"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "options": {
+ "version": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ },
+ "user": {
+ "level": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "auth_proto": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "priv_proto": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ },
+ "charts": {
+ "items": {
+ "type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
}
},
- "required": [
- "id",
- "title",
- "units",
- "family",
- "type",
- "priority",
- "multiply_range",
- "dimensions"
- ]
+ "dimensions": {
+ "items": {
+ "algorithm": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ }
+ }
+ }
}
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "hostname",
+ "community",
+ "options"
+ ]
+ },
+ {
+ "title": "SNMPv3",
+ "fields": [
+ "user"
+ ]
+ },
+ {
+ "title": "Charts",
+ "fields": [
+ "charts"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "update_every",
- "hostname",
- "community",
- "user",
- "options",
- "charts"
- ]
+ }
}
diff --git a/modules/snmp/init.go b/modules/snmp/init.go
index 802430936..5802d6682 100644
--- a/modules/snmp/init.go
+++ b/modules/snmp/init.go
@@ -12,7 +12,7 @@ import (
var newSNMPClient = gosnmp.NewHandler
-func (s SNMP) validateConfig() error {
+func (s *SNMP) validateConfig() error {
if len(s.ChartsInput) == 0 {
return errors.New("'charts' are required but not set")
}
@@ -35,7 +35,7 @@ func (s SNMP) validateConfig() error {
return nil
}
-func (s SNMP) initSNMPClient() (gosnmp.Handler, error) {
+func (s *SNMP) initSNMPClient() (gosnmp.Handler, error) {
client := newSNMPClient()
if client.SetTarget(s.Hostname); client.Target() == "" {
@@ -96,7 +96,7 @@ func (s SNMP) initSNMPClient() (gosnmp.Handler, error) {
return client, nil
}
-func (s SNMP) initOIDs() (oids []string) {
+func (s *SNMP) initOIDs() (oids []string) {
for _, c := range *s.charts {
for _, d := range c.Dims {
oids = append(oids, d.ID)
diff --git a/modules/snmp/snmp.go b/modules/snmp/snmp.go
index 7aa933f64..28177d9c9 100644
--- a/modules/snmp/snmp.go
+++ b/modules/snmp/snmp.go
@@ -4,6 +4,7 @@ package snmp
import (
_ "embed"
+ "errors"
"fmt"
"strings"
@@ -12,17 +13,6 @@ import (
"github.com/gosnmp/gosnmp"
)
-const (
- defaultUpdateEvery = 10
- defaultHostname = "127.0.0.1"
- defaultCommunity = "public"
- defaultVersion = gosnmp.Version2c
- defaultPort = 161
- defaultRetries = 1
- defaultTimeout = defaultUpdateEvery
- defaultMaxOIDs = 60
-)
-
//go:embed "config_schema.json"
var configSchema string
@@ -36,6 +26,17 @@ func init() {
})
}
+const (
+ defaultUpdateEvery = 10
+ defaultHostname = "127.0.0.1"
+ defaultCommunity = "public"
+ defaultVersion = gosnmp.Version2c
+ defaultPort = 161
+ defaultRetries = 1
+ defaultTimeout = defaultUpdateEvery
+ defaultMaxOIDs = 60
+)
+
func New() *SNMP {
return &SNMP{
Config: Config{
@@ -48,73 +49,87 @@ func New() *SNMP {
Version: defaultVersion.String(),
MaxOIDs: defaultMaxOIDs,
},
+ User: User{
+ Name: "",
+ SecurityLevel: "authPriv",
+ AuthProto: "sha512",
+ AuthKey: "",
+ PrivProto: "aes192c",
+ PrivKey: "",
+ },
},
}
}
type (
Config struct {
- UpdateEvery int `yaml:"update_every"`
- Hostname string `yaml:"hostname"`
- Community string `yaml:"community"`
- User User `yaml:"user"`
- Options Options `yaml:"options"`
- ChartsInput []ChartConfig `yaml:"charts"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Hostname string `yaml:"hostname" json:"hostname"`
+ Community string `yaml:"community" json:"community"`
+ User User `yaml:"user" json:"user"`
+ Options Options `yaml:"options" json:"options"`
+ ChartsInput []ChartConfig `yaml:"charts" json:"charts"`
}
User struct {
- Name string `yaml:"name"`
- SecurityLevel string `yaml:"level"`
- AuthProto string `yaml:"auth_proto"`
- AuthKey string `yaml:"auth_key"`
- PrivProto string `yaml:"priv_proto"`
- PrivKey string `yaml:"priv_key"`
+ Name string `yaml:"name" json:"name"`
+ SecurityLevel string `yaml:"level" json:"level"`
+ AuthProto string `yaml:"auth_proto" json:"auth_proto"`
+ AuthKey string `yaml:"auth_key" json:"auth_key"`
+ PrivProto string `yaml:"priv_proto" json:"priv_proto"`
+ PrivKey string `yaml:"priv_key" json:"priv_key"`
}
Options struct {
- Port int `yaml:"port"`
- Retries int `yaml:"retries"`
- Timeout int `yaml:"timeout"`
- Version string `yaml:"version"`
- MaxOIDs int `yaml:"max_request_size"`
+ Port int `yaml:"port" json:"port"`
+ Retries int `yaml:"retries" json:"retries"`
+ Timeout int `yaml:"timeout" json:"timeout"`
+ Version string `yaml:"version" json:"version"`
+ MaxOIDs int `yaml:"max_request_size" json:"max_request_size"`
}
ChartConfig struct {
- ID string `yaml:"id"`
- Title string `yaml:"title"`
- Units string `yaml:"units"`
- Family string `yaml:"family"`
- Type string `yaml:"type"`
- Priority int `yaml:"priority"`
- IndexRange []int `yaml:"multiply_range"`
- Dimensions []DimensionConfig `yaml:"dimensions"`
+ ID string `yaml:"id" json:"id"`
+ Title string `yaml:"title" json:"title"`
+ Units string `yaml:"units" json:"units"`
+ Family string `yaml:"family" json:"family"`
+ Type string `yaml:"type" json:"type"`
+ Priority int `yaml:"priority" json:"priority"`
+ IndexRange []int `yaml:"multiply_range" json:"multiply_range"`
+ Dimensions []DimensionConfig `yaml:"dimensions" json:"dimensions"`
}
DimensionConfig struct {
- OID string `yaml:"oid"`
- Name string `yaml:"name"`
- Algorithm string `yaml:"algorithm"`
- Multiplier int `yaml:"multiplier"`
- Divisor int `yaml:"divisor"`
+ OID string `yaml:"oid" json:"oid"`
+ Name string `yaml:"name" json:"name"`
+ Algorithm string `yaml:"algorithm" json:"algorithm"`
+ Multiplier int `yaml:"multiplier" json:"multiplier"`
+ Divisor int `yaml:"divisor" json:"divisor"`
}
)
type SNMP struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
- charts *module.Charts
snmpClient gosnmp.Handler
- oids []string
+
+ oids []string
}
-func (s *SNMP) Init() bool {
+func (s *SNMP) Configuration() any {
+ return s.Config
+}
+
+func (s *SNMP) Init() error {
err := s.validateConfig()
if err != nil {
s.Errorf("config validation: %v", err)
- return false
+ return err
}
snmpClient, err := s.initSNMPClient()
if err != nil {
s.Errorf("SNMP client initialization: %v", err)
- return false
+ return err
}
s.Info(snmpClientConnInfo(snmpClient))
@@ -122,24 +137,32 @@ func (s *SNMP) Init() bool {
err = snmpClient.Connect()
if err != nil {
s.Errorf("SNMP client connect: %v", err)
- return false
+ return err
}
s.snmpClient = snmpClient
charts, err := newCharts(s.ChartsInput)
if err != nil {
s.Errorf("Population of charts failed: %v", err)
- return false
+ return err
}
s.charts = charts
s.oids = s.initOIDs()
- return true
+ return nil
}
-func (s *SNMP) Check() bool {
- return len(s.Collect()) > 0
+func (s *SNMP) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (s *SNMP) Charts() *module.Charts {
diff --git a/modules/snmp/snmp_test.go b/modules/snmp/snmp_test.go
index 9f1ef0e90..21fc05530 100644
--- a/modules/snmp/snmp_test.go
+++ b/modules/snmp/snmp_test.go
@@ -5,6 +5,7 @@ package snmp
import (
"errors"
"fmt"
+ "os"
"strings"
"testing"
@@ -16,8 +17,22 @@ import (
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- assert.IsType(t, (*SNMP)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSNMP_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &SNMP{}, dataConfigJSON, dataConfigYAML)
}
func TestSNMP_Init(t *testing.T) {
@@ -107,9 +122,9 @@ func TestSNMP_Init(t *testing.T) {
snmp := test.prepareSNMP()
if test.wantFail {
- assert.False(t, snmp.Init())
+ assert.Error(t, snmp.Init())
} else {
- assert.True(t, snmp.Init())
+ assert.NoError(t, snmp.Init())
}
})
}
@@ -209,12 +224,12 @@ func TestSNMP_Check(t *testing.T) {
defaultMockExpects(mockSNMP)
snmp := test.prepareSNMP(mockSNMP)
- require.True(t, snmp.Init())
+ require.NoError(t, snmp.Init())
if test.wantFail {
- assert.False(t, snmp.Check())
+ assert.Error(t, snmp.Check())
} else {
- assert.True(t, snmp.Check())
+ assert.NoError(t, snmp.Check())
}
})
}
@@ -311,7 +326,7 @@ func TestSNMP_Collect(t *testing.T) {
defaultMockExpects(mockSNMP)
snmp := test.prepareSNMP(mockSNMP)
- require.True(t, snmp.Init())
+ require.NoError(t, snmp.Init())
collected := snmp.Collect()
@@ -328,7 +343,7 @@ func TestSNMP_Cleanup(t *testing.T) {
prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
snmp := New()
snmp.Config = prepareV2Config()
- require.True(t, snmp.Init())
+ require.NoError(t, snmp.Init())
m.EXPECT().Close().Times(1)
@@ -339,7 +354,7 @@ func TestSNMP_Cleanup(t *testing.T) {
prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
snmp := New()
snmp.Config = prepareV2Config()
- require.True(t, snmp.Init())
+ require.NoError(t, snmp.Init())
snmp.snmpClient = nil
return snmp
@@ -371,7 +386,7 @@ func TestSNMP_Charts(t *testing.T) {
prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
snmp := New()
snmp.Config = prepareV2Config()
- require.True(t, snmp.Init())
+ require.NoError(t, snmp.Init())
return snmp
},
@@ -381,7 +396,7 @@ func TestSNMP_Charts(t *testing.T) {
prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP {
snmp := New()
snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 9)
- require.True(t, snmp.Init())
+ require.NoError(t, snmp.Init())
return snmp
},
diff --git a/modules/snmp/testdata/config.json b/modules/snmp/testdata/config.json
new file mode 100644
index 000000000..c0fff4868
--- /dev/null
+++ b/modules/snmp/testdata/config.json
@@ -0,0 +1,42 @@
+{
+ "update_every": 123,
+ "hostname": "ok",
+ "community": "ok",
+ "user": {
+ "name": "ok",
+ "level": "ok",
+ "auth_proto": "ok",
+ "auth_key": "ok",
+ "priv_proto": "ok",
+ "priv_key": "ok"
+ },
+ "options": {
+ "port": 123,
+ "retries": 123,
+ "timeout": 123,
+ "version": "ok",
+ "max_request_size": 123
+ },
+ "charts": [
+ {
+ "id": "ok",
+ "title": "ok",
+ "units": "ok",
+ "family": "ok",
+ "type": "ok",
+ "priority": 123,
+ "multiply_range": [
+ 123
+ ],
+ "dimensions": [
+ {
+ "oid": "ok",
+ "name": "ok",
+ "algorithm": "ok",
+ "multiplier": 123,
+ "divisor": 123
+ }
+ ]
+ }
+ ]
+}
diff --git a/modules/snmp/testdata/config.yaml b/modules/snmp/testdata/config.yaml
new file mode 100644
index 000000000..98620fb9c
--- /dev/null
+++ b/modules/snmp/testdata/config.yaml
@@ -0,0 +1,31 @@
+update_every: 123
+hostname: "ok"
+community: "ok"
+user:
+ name: "ok"
+ level: "ok"
+ auth_proto: "ok"
+ auth_key: "ok"
+ priv_proto: "ok"
+ priv_key: "ok"
+options:
+ port: 123
+ retries: 123
+ timeout: 123
+ version: "ok"
+ max_request_size: 123
+charts:
+ - id: "ok"
+ title: "ok"
+ units: "ok"
+ family: "ok"
+ type: "ok"
+ priority: 123
+ multiply_range:
+ - 123
+ dimensions:
+ - oid: "ok"
+ name: "ok"
+ algorithm: "ok"
+ multiplier: 123
+ divisor: 123
diff --git a/modules/solr/README.md b/modules/solr/README.md
deleted file mode 120000
index 0bca1b31a..000000000
--- a/modules/solr/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/solr.md
\ No newline at end of file
diff --git a/modules/solr/charts.go b/modules/solr/charts.go
deleted file mode 100644
index caaa72489..000000000
--- a/modules/solr/charts.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package solr
-
-import (
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-type (
- // Charts is an alias for module.Charts
- Charts = module.Charts
- // Dims is an alias for module.Dims
- Dims = module.Dims
-)
-
-var charts = Charts{
- {
- ID: "search_requests",
- Title: "Search Requests",
- Units: "requests/s",
- Ctx: "solr.search_requests",
- Dims: Dims{
- {ID: "query_requests_count", Name: "search", Algo: module.Incremental},
- },
- },
- {
- ID: "search_errors",
- Title: "Search Errors",
- Units: "errors/s",
- Ctx: "solr.search_errors",
- Dims: Dims{
- {ID: "query_errors_count", Name: "errors", Algo: module.Incremental},
- },
- },
- {
- ID: "search_errors_by_type",
- Title: "Search Errors By Type",
- Units: "errors/s",
- Ctx: "solr.search_errors_by_type",
- Dims: Dims{
- {ID: "query_clientErrors_count", Name: "client", Algo: module.Incremental},
- {ID: "query_serverErrors_count", Name: "server", Algo: module.Incremental},
- {ID: "query_timeouts_count", Name: "timeouts", Algo: module.Incremental},
- },
- },
- {
- ID: "search_requests_processing_time",
- Title: "Search Requests Processing Time",
- Units: "milliseconds",
- Ctx: "solr.search_requests_processing_time",
- Dims: Dims{
- {ID: "query_totalTime_count", Name: "time", Algo: module.Incremental},
- },
- },
- {
- ID: "search_requests_timings",
- Title: "Search Requests Timings",
- Units: "milliseconds",
- Ctx: "solr.search_requests_timings",
- Dims: Dims{
- {ID: "query_requestTimes_min_ms", Name: "min", Div: 1000000},
- {ID: "query_requestTimes_median_ms", Name: "median", Div: 1000000},
- {ID: "query_requestTimes_mean_ms", Name: "mean", Div: 1000000},
- {ID: "query_requestTimes_max_ms", Name: "max", Div: 1000000},
- },
- },
- {
- ID: "search_requests_processing_time_percentile",
- Title: "Search Requests Processing Time Percentile",
- Units: "milliseconds",
- Ctx: "solr.search_requests_processing_time_percentile",
- Dims: Dims{
- {ID: "query_requestTimes_p75_ms", Name: "p75", Div: 1000000},
- {ID: "query_requestTimes_p95_ms", Name: "p95", Div: 1000000},
- {ID: "query_requestTimes_p99_ms", Name: "p99", Div: 1000000},
- {ID: "query_requestTimes_p999_ms", Name: "p999", Div: 1000000},
- },
- },
- {
- ID: "update_requests",
- Title: "Update Requests",
- Units: "requests/s",
- Ctx: "solr.update_requests",
- Dims: Dims{
- {ID: "update_requests_count", Name: "update", Algo: module.Incremental},
- },
- },
- {
- ID: "update_errors",
- Title: "Update Errors",
- Units: "errors/s",
- Ctx: "solr.update_errors",
- Dims: Dims{
- {ID: "update_errors_count", Name: "errors", Algo: module.Incremental},
- },
- },
- {
- ID: "update_errors_by_type",
- Title: "Update Errors By Type",
- Units: "errors/s",
- Ctx: "solr.update_errors_by_type",
- Dims: Dims{
- {ID: "update_clientErrors_count", Name: "client", Algo: module.Incremental},
- {ID: "update_serverErrors_count", Name: "server", Algo: module.Incremental},
- {ID: "update_timeouts_count", Name: "timeouts", Algo: module.Incremental},
- },
- },
- {
- ID: "update_requests_processing_time",
- Title: "Update Requests Processing Time",
- Units: "milliseconds",
- Ctx: "solr.update_requests_processing_time",
- Dims: Dims{
- {ID: "update_totalTime_count", Name: "time", Algo: module.Incremental},
- },
- },
- {
- ID: "update_requests_timings",
- Title: "Update Requests Timings",
- Units: "milliseconds",
- Ctx: "solr.update_requests_timings",
- Dims: Dims{
- {ID: "update_requestTimes_min_ms", Name: "min", Div: 1000000},
- {ID: "update_requestTimes_median_ms", Name: "median", Div: 1000000},
- {ID: "update_requestTimes_mean_ms", Name: "mean", Div: 1000000},
- {ID: "update_requestTimes_max_ms", Name: "max", Div: 1000000},
- },
- },
- {
- ID: "update_requests_processing_time_percentile",
- Title: "Update Requests Processing Time Percentile",
- Units: "milliseconds",
- Ctx: "solr.update_requests_processing_time_percentile",
- Dims: Dims{
- {ID: "update_requestTimes_p75_ms", Name: "p75", Div: 1000000},
- {ID: "update_requestTimes_p95_ms", Name: "p95", Div: 1000000},
- {ID: "update_requestTimes_p99_ms", Name: "p99", Div: 1000000},
- {ID: "update_requestTimes_p999_ms", Name: "p999", Div: 1000000},
- },
- },
-}
diff --git a/modules/solr/config_schema.json b/modules/solr/config_schema.json
deleted file mode 100644
index 66dde58bf..000000000
--- a/modules/solr/config_schema.json
+++ /dev/null
@@ -1,59 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/solr job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
- }
- },
- "required": [
- "name",
- "url"
- ]
-}
diff --git a/modules/solr/integrations/solr.md b/modules/solr/integrations/solr.md
deleted file mode 100644
index ad0b6acbb..000000000
--- a/modules/solr/integrations/solr.md
+++ /dev/null
@@ -1,223 +0,0 @@
-
-
-# Solr
-
-
-
-
-
-Plugin: go.d.plugin
-Module: solr
-
-
-
-## Overview
-
-This collector monitors Solr instances.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-This integration doesn't support auto-detection.
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Solr instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| solr.search_requests | search | requests/s |
-| solr.search_errors | errors | errors/s |
-| solr.search_errors_by_type | client, server, timeouts | errors/s |
-| solr.search_requests_processing_time | time | milliseconds |
-| solr.search_requests_timings | min, median, mean, max | milliseconds |
-| solr.search_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |
-| solr.update_requests | search | requests/s |
-| solr.update_errors | errors | errors/s |
-| solr.update_errors_by_type | client, server, timeouts | errors/s |
-| solr.update_requests_processing_time | time | milliseconds |
-| solr.update_requests_timings | min, median, mean, max | milliseconds |
-| solr.update_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Solr version 6.4+
-
-This collector does not work with Solr versions lower 6.4.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `go.d/solr.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config go.d/solr.conf
-```
-#### Options
-
-The following options can be defined globally: update_every, autodetection_retry.
-
-
-All options
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Data collection frequency. | 1 | no |
-| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
-| url | Server URL. | http://127.0.0.1:8983 | yes |
-| socket | Server Unix socket. | | no |
-| address | Server address in IP:PORT format. | | no |
-| fcgi_path | Status path. | /status | no |
-| timeout | HTTP request timeout. | 1 | no |
-| username | Username for basic HTTP authentication. | | no |
-| password | Password for basic HTTP authentication. | | no |
-| proxy_url | Proxy URL. | | no |
-| proxy_username | Username for proxy basic HTTP authentication. | | no |
-| proxy_password | Password for proxy basic HTTP authentication. | | no |
-| method | HTTP request method. | GET | no |
-| body | HTTP request body. | | no |
-| headers | HTTP request headers. | | no |
-| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
-| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
-| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
-| tls_cert | Client TLS certificate. | | no |
-| tls_key | Client TLS key. | | no |
-
-
-
-#### Examples
-
-##### Basic
-
-An example configuration.
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: http://localhost:8983
-
-```
-
-
-##### Basic HTTP auth
-
-Local Solr instance with basic HTTP authentication.
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: http://localhost:8983
- username: foo
- password: bar
-
-```
-
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Local and remote instances.
-
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: http://localhost:8983
-
- - name: remote
- url: http://203.0.113.10:8983
-
-```
-
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `solr` collector, run the `go.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `go.d.plugin` to debug the collector:
-
- ```bash
- ./go.d.plugin -d -m solr
- ```
-
-
diff --git a/modules/solr/metadata.yaml b/modules/solr/metadata.yaml
deleted file mode 100644
index 066744f63..000000000
--- a/modules/solr/metadata.yaml
+++ /dev/null
@@ -1,268 +0,0 @@
-plugin_name: go.d.plugin
-modules:
- - meta:
- id: collector-go.d.plugin-solr
- plugin_name: go.d.plugin
- module_name: solr
- monitored_instance:
- name: Solr
- link: https://lucene.apache.org/solr/
- icon_filename: solr.svg
- categories:
- - data-collection.search-engines
- keywords:
- - solr
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- most_popular: false
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Solr instances.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Solr version 6.4+
- description: |
- This collector does not work with Solr versions lower 6.4.
- configuration:
- file:
- name: go.d/solr.conf
- options:
- description: |
- The following options can be defined globally: update_every, autodetection_retry.
- folding:
- title: All options
- enabled: true
- list:
- - name: update_every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: autodetection_retry
- description: Recheck interval in seconds. Zero means no recheck will be scheduled.
- default_value: 0
- required: false
- - name: url
- description: Server URL.
- default_value: http://127.0.0.1:8983
- required: true
- - name: socket
- description: Server Unix socket.
- default_value: ""
- required: false
- - name: address
- description: Server address in IP:PORT format.
- default_value: ""
- required: false
- - name: fcgi_path
- description: Status path.
- default_value: /status
- required: false
- - name: timeout
- description: HTTP request timeout.
- default_value: 1
- required: false
- - name: username
- description: Username for basic HTTP authentication.
- default_value: ""
- required: false
- - name: password
- description: Password for basic HTTP authentication.
- default_value: ""
- required: false
- - name: proxy_url
- description: Proxy URL.
- default_value: ""
- required: false
- - name: proxy_username
- description: Username for proxy basic HTTP authentication.
- default_value: ""
- required: false
- - name: proxy_password
- description: Password for proxy basic HTTP authentication.
- default_value: ""
- required: false
- - name: method
- description: HTTP request method.
- default_value: GET
- required: false
- - name: body
- description: HTTP request body.
- default_value: ""
- required: false
- - name: headers
- description: HTTP request headers.
- default_value: ""
- required: false
- - name: not_follow_redirects
- description: Redirect handling policy. Controls whether the client follows redirects.
- default_value: false
- required: false
- - name: tls_skip_verify
- description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
- default_value: false
- required: false
- - name: tls_ca
- description: Certification authority that the client uses when verifying the server's certificates.
- default_value: ""
- required: false
- - name: tls_cert
- description: Client TLS certificate.
- default_value: ""
- required: false
- - name: tls_key
- description: Client TLS key.
- default_value: ""
- required: false
- examples:
- folding:
- title: Config
- enabled: true
- list:
- - name: Basic
- description: An example configuration.
- config: |
- jobs:
- - name: local
- url: http://localhost:8983
- - name: Basic HTTP auth
- description: Local Solr instance with basic HTTP authentication.
- config: |
- jobs:
- - name: local
- url: http://localhost:8983
- username: foo
- password: bar
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Local and remote instances.
- config: |
- jobs:
- - name: local
- url: http://localhost:8983
-
- - name: remote
- url: http://203.0.113.10:8983
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: solr.search_requests
- description: Search Requests
- unit: requests/s
- chart_type: line
- dimensions:
- - name: search
- - name: solr.search_errors
- description: Search Errors
- unit: errors/s
- chart_type: line
- dimensions:
- - name: errors
- - name: solr.search_errors_by_type
- description: Search Errors By Type
- unit: errors/s
- chart_type: line
- dimensions:
- - name: client
- - name: server
- - name: timeouts
- - name: solr.search_requests_processing_time
- description: Search Requests Processing Time
- unit: milliseconds
- chart_type: line
- dimensions:
- - name: time
- - name: solr.search_requests_timings
- description: Search Requests Timings
- unit: milliseconds
- chart_type: line
- dimensions:
- - name: min
- - name: median
- - name: mean
- - name: max
- - name: solr.search_requests_processing_time_percentile
- description: Search Requests Processing Time Percentile
- unit: milliseconds
- chart_type: line
- dimensions:
- - name: p75
- - name: p95
- - name: p99
- - name: p999
- - name: solr.update_requests
- description: Update Requests
- unit: requests/s
- chart_type: line
- dimensions:
- - name: search
- - name: solr.update_errors
- description: Update Errors
- unit: errors/s
- chart_type: line
- dimensions:
- - name: errors
- - name: solr.update_errors_by_type
- description: Update Errors By Type
- unit: errors/s
- chart_type: line
- dimensions:
- - name: client
- - name: server
- - name: timeouts
- - name: solr.update_requests_processing_time
- description: Update Requests Processing Time
- unit: milliseconds
- chart_type: line
- dimensions:
- - name: time
- - name: solr.update_requests_timings
- description: Update Requests Timings
- unit: milliseconds
- chart_type: line
- dimensions:
- - name: min
- - name: median
- - name: mean
- - name: max
- - name: solr.update_requests_processing_time_percentile
- description: Update Requests Processing Time Percentile
- unit: milliseconds
- chart_type: line
- dimensions:
- - name: p75
- - name: p95
- - name: p99
- - name: p999
diff --git a/modules/solr/parser.go b/modules/solr/parser.go
deleted file mode 100644
index c8a9eaa54..000000000
--- a/modules/solr/parser.go
+++ /dev/null
@@ -1,151 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package solr
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "strings"
-)
-
-type count struct {
- Count int64
-}
-
-type common struct {
- Count int64
- MeanRate float64 `json:"meanRate"`
- MinRate1min float64 `json:"1minRate"`
- MinRate5min float64 `json:"5minRate"`
- MinRate15min float64 `json:"15minRate"`
-}
-
-type requestTimes struct {
- Count int64
- MeanRate float64 `json:"meanRate"`
- MinRate1min float64 `json:"1minRate"`
- MinRate5min float64 `json:"5minRate"`
- MinRate15min float64 `json:"15minRate"`
- MinMS float64 `json:"min_ms"`
- MaxMS float64 `json:"max_ms"`
- MeanMS float64 `json:"mean_ms"`
- MedianMS float64 `json:"median_ms"`
- StdDevMS float64 `json:"stddev_ms"`
- P75MS float64 `json:"p75_ms"`
- P95MS float64 `json:"p95_ms"`
- P99MS float64 `json:"p99_ms"`
- P999MS float64 `json:"p999_ms"`
-}
-
-type coresMetrics struct {
- Metrics map[string]map[string]json.RawMessage
-}
-
-func (s *Solr) parse(resp *http.Response) (map[string]int64, error) {
- var cm coresMetrics
- var metrics = make(map[string]int64)
-
- if err := json.NewDecoder(resp.Body).Decode(&cm); err != nil {
- return nil, err
- }
-
- if len(cm.Metrics) == 0 {
- return nil, errors.New("unparsable data")
- }
-
- for core, data := range cm.Metrics {
- coreName := core[10:]
-
- if !s.cores[coreName] {
- s.addCoreCharts(coreName)
- s.cores[coreName] = true
- }
-
- if err := s.parseCore(coreName, data, metrics); err != nil {
- return nil, err
- }
- }
-
- return metrics, nil
-}
-
-func (s *Solr) parseCore(core string, data map[string]json.RawMessage, metrics map[string]int64) error {
- var (
- simpleCount int64
- count count
- common common
- requestTimes requestTimes
- )
-
- for metric, stats := range data {
- parts := strings.Split(metric, ".")
-
- if len(parts) != 3 {
- continue
- }
-
- typ, handler, stat := strings.ToLower(parts[0]), parts[1], parts[2]
-
- if handler == "updateHandler" {
- // TODO:
- continue
- }
-
- switch stat {
- case "clientErrors", "errors", "serverErrors", "timeouts":
- if err := json.Unmarshal(stats, &common); err != nil {
- return err
- }
- metrics[format("%s_%s_%s_count", core, typ, stat)] += common.Count
- case "requests", "totalTime":
- var c int64
- if s.version < 7.0 {
- if err := json.Unmarshal(stats, &count); err != nil {
- return err
- }
- c = count.Count
- } else {
- if err := json.Unmarshal(stats, &simpleCount); err != nil {
- return err
- }
- c = simpleCount
- }
- metrics[format("%s_%s_%s_count", core, typ, stat)] += c
- case "requestTimes":
- if err := json.Unmarshal(stats, &requestTimes); err != nil {
- return err
- }
- metrics[format("%s_%s_%s_count", core, typ, stat)] += requestTimes.Count
- metrics[format("%s_%s_%s_min_ms", core, typ, stat)] += int64(requestTimes.MinMS * 1e6)
- metrics[format("%s_%s_%s_mean_ms", core, typ, stat)] += int64(requestTimes.MeanMS * 1e6)
- metrics[format("%s_%s_%s_median_ms", core, typ, stat)] += int64(requestTimes.MedianMS * 1e6)
- metrics[format("%s_%s_%s_max_ms", core, typ, stat)] += int64(requestTimes.MaxMS * 1e6)
- metrics[format("%s_%s_%s_p75_ms", core, typ, stat)] += int64(requestTimes.P75MS * 1e6)
- metrics[format("%s_%s_%s_p95_ms", core, typ, stat)] += int64(requestTimes.P95MS * 1e6)
- metrics[format("%s_%s_%s_p99_ms", core, typ, stat)] += int64(requestTimes.P99MS * 1e6)
- metrics[format("%s_%s_%s_p999_ms", core, typ, stat)] += int64(requestTimes.P999MS * 1e6)
- }
- }
-
- return nil
-}
-
-func (s *Solr) addCoreCharts(core string) {
- charts := charts.Copy()
-
- for _, chart := range *charts {
- chart.ID = format("%s_%s", core, chart.ID)
- chart.Fam = format("core %s", core)
-
- for _, dim := range chart.Dims {
- dim.ID = format("%s_%s", core, dim.ID)
- }
- }
-
- _ = s.charts.Add(*charts...)
-
-}
-
-var format = fmt.Sprintf
diff --git a/modules/solr/solr.go b/modules/solr/solr.go
deleted file mode 100644
index 57f2d7083..000000000
--- a/modules/solr/solr.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package solr
-
-import (
- _ "embed"
- "encoding/json"
- "fmt"
- "io"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-//go:embed "config_schema.json"
-var configSchema string
-
-func init() {
- module.Register("solr", module.Creator{
- JobConfigSchema: configSchema,
- Create: func() module.Module { return New() },
- })
-}
-
-const (
- defaultURL = "http://127.0.0.1:8983"
- defaultHTTPTimeout = time.Second
-)
-
-const (
- minSupportedVersion = 6.4
- coresHandlersURLPath = "/solr/admin/metrics"
- coresHandlersURLQuery = "group=core&prefix=UPDATE,QUERY&wt=json"
- infoSystemURLPath = "/solr/admin/info/system"
- infoSystemURLQuery = "wt=json"
-)
-
-type infoSystem struct {
- Lucene struct {
- Version string `json:"solr-spec-version"`
- }
-}
-
-// New creates Solr with default values
-func New() *Solr {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
- },
- },
- }
- return &Solr{
- Config: config,
- cores: make(map[string]bool),
- }
-}
-
-// Config is the Solr module configuration.
-type Config struct {
- web.HTTP `yaml:",inline"`
-}
-
-// Solr solr module
-type Solr struct {
- module.Base
- Config `yaml:",inline"`
-
- cores map[string]bool
- client *http.Client
- version float64
- charts *Charts
-}
-
-func (s *Solr) doRequest(req *http.Request) (*http.Response, error) {
- return s.client.Do(req)
-}
-
-// Cleanup makes cleanup
-func (Solr) Cleanup() {}
-
-// Init makes initialization
-func (s *Solr) Init() bool {
- if s.URL == "" {
- s.Error("URL not set")
- return false
- }
-
- client, err := web.NewHTTPClient(s.Client)
- if err != nil {
- s.Error(err)
- return false
- }
-
- s.client = client
- return true
-}
-
-// Check makes check
-func (s *Solr) Check() bool {
- if err := s.getVersion(); err != nil {
- s.Error(err)
- return false
- }
-
- if s.version < minSupportedVersion {
- s.Errorf("unsupported Solr version : %.1f", s.version)
- return false
- }
-
- return true
-}
-
-// Charts creates Charts
-func (s *Solr) Charts() *Charts {
- s.charts = &Charts{}
-
- return s.charts
-}
-
-// Collect collects metrics
-func (s *Solr) Collect() map[string]int64 {
- req, err := createRequest(s.Request, coresHandlersURLPath, coresHandlersURLQuery)
- if err != nil {
- s.Errorf("error on creating http request : %v", err)
- return nil
- }
-
- resp, err := s.doRequest(req)
- if err != nil {
- s.Errorf("error on request to %s : %s", req.URL, err)
- return nil
- }
- defer closeBody(resp)
-
- if resp.StatusCode != http.StatusOK {
- s.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
- return nil
- }
-
- metrics, err := s.parse(resp)
- if err != nil {
- s.Errorf("error on parse response from %s : %s", req.URL, err)
- return nil
- }
-
- return metrics
-}
-
-func (s *Solr) getVersion() error {
- req, err := createRequest(s.Request, infoSystemURLPath, infoSystemURLQuery)
- if err != nil {
- return fmt.Errorf("error on creating http request : %v", err)
- }
-
- resp, err := s.doRequest(req)
- if err != nil {
- return fmt.Errorf("error on request to %s : %s", req.URL, err)
- }
- defer closeBody(resp)
-
- if resp.StatusCode != http.StatusOK {
- return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode)
- }
-
- var info infoSystem
-
- if err := json.NewDecoder(resp.Body).Decode(&info); err != nil {
- return fmt.Errorf("error on decode response from %s : %s", req.URL, err)
- }
-
- var idx int
-
- if idx = strings.LastIndex(info.Lucene.Version, "."); idx == -1 {
- return fmt.Errorf("error on parsing version '%s': bad format", info.Lucene.Version)
- }
-
- if s.version, err = strconv.ParseFloat(info.Lucene.Version[:idx], 64); err != nil {
- return fmt.Errorf("error on parsing version '%s' : %s", info.Lucene.Version, err)
- }
-
- return nil
-}
-
-func createRequest(req web.Request, urlPath, urlQuery string) (*http.Request, error) {
- r := req.Copy()
- u, err := url.Parse(r.URL)
- if err != nil {
- return nil, err
- }
-
- u.Path = urlPath
- u.RawQuery = urlQuery
- r.URL = u.String()
- return web.NewHTTPRequest(r)
-}
-
-func closeBody(resp *http.Response) {
- if resp != nil && resp.Body != nil {
- _, _ = io.Copy(io.Discard, resp.Body)
- _ = resp.Body.Close()
- }
-}
diff --git a/modules/solr/solr_test.go b/modules/solr/solr_test.go
deleted file mode 100644
index f545adeb0..000000000
--- a/modules/solr/solr_test.go
+++ /dev/null
@@ -1,274 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package solr
-
-import (
- "fmt"
- "net/http"
- "net/http/httptest"
- "os"
- "testing"
-
- "github.com/netdata/go.d.plugin/agent/module"
- "github.com/stretchr/testify/assert"
- "github.com/stretchr/testify/require"
-)
-
-var (
- coreMetricsV6, _ = os.ReadFile("testdata/core-metrics-v6.txt")
- coreMetricsV7, _ = os.ReadFile("testdata/core-metrics-v7.txt")
-)
-
-func version(v string) string {
- return format(`{ "lucene":{ "solr-spec-version":"%s"}}`, v)
-}
-
-func TestNew(t *testing.T) {
- job := New()
-
- assert.Implements(t, (*module.Module)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration)
-}
-
-func TestSolr_Init(t *testing.T) {
- job := New()
-
- assert.True(t, job.Init())
- assert.NotNil(t, job.client)
-}
-
-func TestSolr_Check(t *testing.T) {
- job := New()
-
- ts := httptest.NewServer(
- http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/solr/admin/info/system" {
- _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion))))
- return
- }
- }))
-
- job.URL = ts.URL
- require.True(t, job.Init())
- assert.True(t, job.Check())
-}
-
-func TestSolr_Check_UnsupportedVersion(t *testing.T) {
- job := New()
-
- ts := httptest.NewServer(
- http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/solr/admin/info/system" {
- _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion-1))))
- return
- }
- }))
-
- job.URL = ts.URL
-
- require.True(t, job.Init())
-
- assert.False(t, job.Check())
-}
-
-func TestSolr_Charts(t *testing.T) {
- assert.NotNil(t, New().Charts())
-}
-
-func TestSolr_Cleanup(t *testing.T) {
- New().Cleanup()
-}
-
-func TestSolr_CollectV6(t *testing.T) {
- job := New()
-
- ts := httptest.NewServer(
- http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/solr/admin/info/system" {
- _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion))))
- return
- }
- if r.URL.Path == "/solr/admin/metrics" {
- _, _ = w.Write(coreMetricsV6)
- return
- }
- }))
-
- job.URL = ts.URL
-
- require.True(t, job.Init())
- require.True(t, job.Check())
- require.NotNil(t, job.Charts())
-
- expected := map[string]int64{
- "core2_query_requestTimes_min_ms": 0,
- "core1_query_serverErrors_count": 3,
- "core2_update_requestTimes_mean_ms": 0,
- "core2_query_requestTimes_p99_ms": 297000000,
- "core2_query_requestTimes_p999_ms": 2997000000,
- "core1_update_requestTimes_p99_ms": 297000000,
- "core2_update_requestTimes_p75_ms": 225000000,
- "core2_update_requests_count": 3,
- "core2_query_requestTimes_p75_ms": 225000000,
- "core2_update_requestTimes_min_ms": 0,
- "core2_query_clientErrors_count": 3,
- "core2_query_requestTimes_count": 3,
- "core2_query_requestTimes_median_ms": 0,
- "core2_query_requestTimes_p95_ms": 285000000,
- "core2_update_serverErrors_count": 3,
- "core1_query_requestTimes_mean_ms": 0,
- "core1_update_totalTime_count": 3,
- "core1_update_errors_count": 3,
- "core1_query_errors_count": 3,
- "core1_query_timeouts_count": 3,
- "core1_update_requestTimes_p95_ms": 285000000,
- "core1_query_clientErrors_count": 3,
- "core2_query_serverErrors_count": 3,
- "core1_update_requestTimes_p75_ms": 225000000,
- "core2_update_requestTimes_p99_ms": 297000000,
- "core2_query_requests_count": 3,
- "core2_update_clientErrors_count": 3,
- "core1_update_requestTimes_min_ms": 0,
- "core1_update_requestTimes_mean_ms": 0,
- "core1_query_requestTimes_p95_ms": 285000000,
- "core1_query_requestTimes_p999_ms": 2997000000,
- "core1_update_serverErrors_count": 3,
- "core1_query_requests_count": 3,
- "core1_update_requestTimes_p999_ms": 2997000000,
- "core1_query_requestTimes_p75_ms": 225000000,
- "core1_update_requestTimes_count": 3,
- "core2_update_requestTimes_p95_ms": 285000000,
- "core1_query_requestTimes_count": 3,
- "core1_query_requestTimes_p99_ms": 297000000,
- "core1_update_requestTimes_median_ms": 0,
- "core1_update_requestTimes_max_ms": 0,
- "core2_update_requestTimes_count": 3,
- "core1_query_requestTimes_min_ms": 0,
- "core1_update_timeouts_count": 3,
- "core2_update_timeouts_count": 3,
- "core2_update_errors_count": 3,
- "core1_update_requests_count": 3,
- "core2_query_errors_count": 3,
- "core1_query_requestTimes_median_ms": 0,
- "core1_query_requestTimes_max_ms": 0,
- "core1_update_clientErrors_count": 3,
- "core2_update_requestTimes_median_ms": 0,
- "core2_query_requestTimes_mean_ms": 0,
- "core2_update_totalTime_count": 3,
- "core2_update_requestTimes_max_ms": 0,
- "core2_update_requestTimes_p999_ms": 2997000000,
- "core2_query_timeouts_count": 3,
- "core2_query_requestTimes_max_ms": 0,
- "core1_query_totalTime_count": 3,
- "core2_query_totalTime_count": 3,
- }
-
- assert.Equal(t, expected, job.Collect())
- assert.Equal(t, expected, job.Collect())
-}
-
-func TestSolr_CollectV7(t *testing.T) {
- job := New()
-
- ts := httptest.NewServer(
- http.HandlerFunc(
- func(w http.ResponseWriter, r *http.Request) {
- if r.URL.Path == "/solr/admin/info/system" {
- _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion+1))))
- return
- }
- if r.URL.Path == "/solr/admin/metrics" {
- _, _ = w.Write(coreMetricsV7)
- return
- }
- }))
-
- job.URL = ts.URL
-
- require.True(t, job.Init())
- require.True(t, job.Check())
- require.NotNil(t, job.Charts())
-
- expected := map[string]int64{
- "core1_query_requestTimes_p95_ms": 285000000,
- "core1_query_timeouts_count": 3,
- "core1_update_requestTimes_p999_ms": 2997000000,
- "core2_query_requestTimes_mean_ms": 0,
- "core2_query_timeouts_count": 3,
- "core1_update_timeouts_count": 3,
- "core1_update_requestTimes_mean_ms": 0,
- "core2_update_serverErrors_count": 3,
- "core2_query_requestTimes_min_ms": 0,
- "core2_query_requestTimes_p75_ms": 225000000,
- "core2_update_clientErrors_count": 3,
- "core2_update_requestTimes_count": 3,
- "core2_query_requestTimes_max_ms": 0,
- "core1_query_requestTimes_mean_ms": 0,
- "core1_update_totalTime_count": 3,
- "core1_query_serverErrors_count": 3,
- "core1_update_requestTimes_p99_ms": 297000000,
- "core2_query_totalTime_count": 3,
- "core2_update_requestTimes_max_ms": 0,
- "core2_query_requestTimes_p99_ms": 297000000,
- "core1_query_requestTimes_count": 3,
- "core1_query_requestTimes_median_ms": 0,
- "core1_query_clientErrors_count": 3,
- "core2_update_requestTimes_mean_ms": 0,
- "core2_update_requestTimes_median_ms": 0,
- "core2_update_requestTimes_p95_ms": 285000000,
- "core2_update_requestTimes_p999_ms": 2997000000,
- "core2_update_totalTime_count": 3,
- "core1_update_clientErrors_count": 3,
- "core2_query_serverErrors_count": 3,
- "core2_query_requests_count": 3,
- "core1_update_serverErrors_count": 3,
- "core1_update_requestTimes_p75_ms": 225000000,
- "core2_update_requestTimes_min_ms": 0,
- "core2_query_errors_count": 3,
- "core1_update_errors_count": 3,
- "core1_query_totalTime_count": 3,
- "core1_update_requestTimes_p95_ms": 285000000,
- "core2_query_requestTimes_p95_ms": 285000000,
- "core2_query_requestTimes_p999_ms": 2997000000,
- "core1_query_requestTimes_min_ms": 0,
- "core2_update_errors_count": 3,
- "core2_query_clientErrors_count": 3,
- "core1_update_requestTimes_min_ms": 0,
- "core1_query_requestTimes_max_ms": 0,
- "core1_query_requestTimes_p75_ms": 225000000,
- "core1_query_requestTimes_p999_ms": 2997000000,
- "core2_update_requestTimes_p75_ms": 225000000,
- "core2_update_timeouts_count": 3,
- "core1_query_requestTimes_p99_ms": 297000000,
- "core1_update_requests_count": 3,
- "core1_update_requestTimes_median_ms": 0,
- "core1_update_requestTimes_max_ms": 0,
- "core2_update_requestTimes_p99_ms": 297000000,
- "core2_query_requestTimes_count": 3,
- "core1_query_errors_count": 3,
- "core1_query_requests_count": 3,
- "core1_update_requestTimes_count": 3,
- "core2_update_requests_count": 3,
- "core2_query_requestTimes_median_ms": 0,
- }
-
- assert.Equal(t, expected, job.Collect())
- assert.Equal(t, expected, job.Collect())
-}
-
-func TestSolr_Collect_404(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(404)
- }))
- defer ts.Close()
-
- job := New()
- job.URL = ts.URL
-
- require.True(t, job.Init())
- assert.False(t, job.Check())
-}
diff --git a/modules/solr/testdata/core-metrics-v6.txt b/modules/solr/testdata/core-metrics-v6.txt
deleted file mode 100644
index 30d756b58..000000000
--- a/modules/solr/testdata/core-metrics-v6.txt
+++ /dev/null
@@ -1,794 +0,0 @@
-{
- "responseHeader":{
- "status":0,
- "QTime":5
- },
- "metrics":{
- "solr.core.core1":{
- "QUERY./select.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./select.requests":{
- "count":1
- },
- "QUERY./select.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.totalTime":{
- "count":1
- },
- "QUERY./sql.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./sql.requests":{
- "count":1
- },
- "QUERY./sql.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.totalTime":{
- "count":1
- },
- "QUERY./stream.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./stream.requests":{
- "count":1
- },
- "QUERY./stream.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.totalTime":{
- "count":1
- },
- "UPDATE./update.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update.requests":{
- "count":1
- },
- "UPDATE./update.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.totalTime":{
- "count":1
- },
- "UPDATE./update/csv.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/csv.requests":{
- "count":1
- },
- "UPDATE./update/csv.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.totalTime":{
- "count":1
- },
- "UPDATE./update/json.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/json.requests":{
- "count":1
- },
- "UPDATE./update/json.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.totalTime":{
- "count":1
- },
- "UPDATE.updateHandler.adds":{
- "value":0
- },
- "UPDATE.updateHandler.autoCommits":{
- "value":0
- },
- "UPDATE.updateHandler.commits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeAdds":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesById":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesByQuery":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.deletesById":{
- "value":0
- },
- "UPDATE.updateHandler.deletesByQuery":{
- "value":0
- },
- "UPDATE.updateHandler.docsPending":{
- "value":0
- },
- "UPDATE.updateHandler.errors":{
- "value":0
- },
- "UPDATE.updateHandler.expungeDeletes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.merges":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.optimizes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.rollbacks":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.softAutoCommits":{
- "value":0
- },
- "UPDATE.updateHandler.splits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- }
- },
- "solr.core.core2":{
- "QUERY./select.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./select.requests":{
- "count":1
- },
- "QUERY./select.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.totalTime":{
- "count":1
- },
- "QUERY./sql.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./sql.requests":{
- "count":1
- },
- "QUERY./sql.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.totalTime":{
- "count":1
- },
- "QUERY./stream.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./stream.requests":{
- "count":1
- },
- "QUERY./stream.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.totalTime":{
- "count":1
- },
- "UPDATE./update.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update.requests":{
- "count":1
- },
- "UPDATE./update.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.totalTime":{
- "count":1
- },
- "UPDATE./update/csv.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/csv.requests":{
- "count":1
- },
- "UPDATE./update/csv.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.totalTime":{
- "count":1
- },
- "UPDATE./update/json.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/json.requests":{
- "count":1
- },
- "UPDATE./update/json.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.totalTime":{
- "count":1
- },
- "UPDATE.updateHandler.adds":{
- "value":0
- },
- "UPDATE.updateHandler.autoCommits":{
- "value":0
- },
- "UPDATE.updateHandler.commits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeAdds":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesById":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesByQuery":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.deletesById":{
- "value":0
- },
- "UPDATE.updateHandler.deletesByQuery":{
- "value":0
- },
- "UPDATE.updateHandler.docsPending":{
- "value":0
- },
- "UPDATE.updateHandler.errors":{
- "value":0
- },
- "UPDATE.updateHandler.expungeDeletes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.merges":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.optimizes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.rollbacks":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.softAutoCommits":{
- "value":0
- },
- "UPDATE.updateHandler.splits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- }
- }
- }
-}
\ No newline at end of file
diff --git a/modules/solr/testdata/core-metrics-v7.txt b/modules/solr/testdata/core-metrics-v7.txt
deleted file mode 100644
index 0567f0d9b..000000000
--- a/modules/solr/testdata/core-metrics-v7.txt
+++ /dev/null
@@ -1,732 +0,0 @@
-{
- "responseHeader":{
- "status":0,
- "QTime":5
- },
- "metrics":{
- "solr.core.core1":{
- "QUERY./select.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.handlerStart":1546020968904,
- "QUERY./select.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./select.requests":1,
- "QUERY./select.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.totalTime":1,
- "QUERY./sql.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.handlerStart":1546020968901,
- "QUERY./sql.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./sql.requests":1,
- "QUERY./sql.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.totalTime":1,
- "QUERY./stream.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.handlerStart":1546020968894,
- "QUERY./stream.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./stream.requests":1,
- "QUERY./stream.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.totalTime":1,
- "UPDATE./update.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.handlerStart":1546020968419,
- "UPDATE./update.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update.requests":1,
- "UPDATE./update.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.totalTime":1,
- "UPDATE./update/csv.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.handlerStart":1546020968462,
- "UPDATE./update/csv.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/csv.requests":1,
- "UPDATE./update/csv.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.totalTime":1,
- "UPDATE./update/json.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.handlerStart":1546020968445,
- "UPDATE./update/json.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/json.requests":1,
- "UPDATE./update/json.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.totalTime":1,
- "UPDATE.updateHandler.adds":0,
- "UPDATE.updateHandler.autoCommitMaxTime":"15000ms",
- "UPDATE.updateHandler.autoCommits":0,
- "UPDATE.updateHandler.commits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeAdds":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesById":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesByQuery":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.deletesById":0,
- "UPDATE.updateHandler.deletesByQuery":0,
- "UPDATE.updateHandler.docsPending":0,
- "UPDATE.updateHandler.errors":0,
- "UPDATE.updateHandler.expungeDeletes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.merges":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.optimizes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.rollbacks":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.softAutoCommits":0,
- "UPDATE.updateHandler.splits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- }
- },
- "solr.core.core2":{
- "QUERY./select.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.handlerStart":1546020968904,
- "QUERY./select.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./select.requests":1,
- "QUERY./select.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./select.totalTime":1,
- "QUERY./sql.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.handlerStart":1546020968901,
- "QUERY./sql.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./sql.requests":1,
- "QUERY./sql.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./sql.totalTime":1,
- "QUERY./stream.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.handlerStart":1546020968894,
- "QUERY./stream.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "QUERY./stream.requests":1,
- "QUERY./stream.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "QUERY./stream.totalTime":1,
- "UPDATE./update.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.handlerStart":1546020968419,
- "UPDATE./update.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update.requests":1,
- "UPDATE./update.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update.totalTime":1,
- "UPDATE./update/csv.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.handlerStart":1546020968462,
- "UPDATE./update/csv.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/csv.requests":1,
- "UPDATE./update/csv.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/csv.totalTime":1,
- "UPDATE./update/json.clientErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.errors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.handlerStart":1546020968445,
- "UPDATE./update/json.requestTimes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0,
- "min_ms":0,
- "max_ms":0,
- "mean_ms":0,
- "median_ms":0,
- "stddev_ms":0,
- "p75_ms":75,
- "p95_ms":95,
- "p99_ms":99,
- "p999_ms":999
- },
- "UPDATE./update/json.requests":1,
- "UPDATE./update/json.serverErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.timeouts":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE./update/json.totalTime":1,
- "UPDATE.updateHandler.adds":0,
- "UPDATE.updateHandler.autoCommitMaxTime":"15000ms",
- "UPDATE.updateHandler.autoCommits":0,
- "UPDATE.updateHandler.commits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeAdds":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesById":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeDeletesByQuery":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.cumulativeErrors":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.deletesById":0,
- "UPDATE.updateHandler.deletesByQuery":0,
- "UPDATE.updateHandler.docsPending":0,
- "UPDATE.updateHandler.errors":0,
- "UPDATE.updateHandler.expungeDeletes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.merges":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.optimizes":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.rollbacks":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- },
- "UPDATE.updateHandler.softAutoCommits":0,
- "UPDATE.updateHandler.splits":{
- "count":1,
- "meanRate":0,
- "1minRate":0,
- "5minRate":0,
- "15minRate":0
- }
- }
- }
-}
\ No newline at end of file
diff --git a/modules/springboot2/README.md b/modules/springboot2/README.md
deleted file mode 120000
index 67b32e517..000000000
--- a/modules/springboot2/README.md
+++ /dev/null
@@ -1 +0,0 @@
-integrations/java_spring-boot_2_applications.md
\ No newline at end of file
diff --git a/modules/springboot2/charts.go b/modules/springboot2/charts.go
deleted file mode 100644
index 9ca9c5806..000000000
--- a/modules/springboot2/charts.go
+++ /dev/null
@@ -1,77 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package springboot2
-
-import (
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-type (
- // Charts is an alias for module.Charts
- Charts = module.Charts
- // Dims is an alias for module.Dims
- Dims = module.Dims
-)
-
-var charts = Charts{
- {
- ID: "response_codes",
- Title: "Response Codes", Units: "requests/s", Fam: "response_code", Type: module.Stacked, Ctx: "springboot2.response_codes",
- Dims: Dims{
- {ID: "resp_2xx", Name: "2xx", Algo: module.Incremental},
- {ID: "resp_5xx", Name: "5xx", Algo: module.Incremental},
- {ID: "resp_3xx", Name: "3xx", Algo: module.Incremental},
- {ID: "resp_4xx", Name: "4xx", Algo: module.Incremental},
- {ID: "resp_1xx", Name: "1xx", Algo: module.Incremental},
- },
- },
- {
- ID: "thread",
- Title: "Threads", Units: "threads", Fam: "threads", Type: module.Area, Ctx: "springboot2.thread",
- Dims: Dims{
- {ID: "threads_daemon", Name: "daemon"},
- {ID: "threads", Name: "total"},
- },
- },
- {
- ID: "heap",
- Title: "Overview", Units: "B", Fam: "heap", Type: module.Stacked, Ctx: "springboot2.heap",
- Dims: Dims{
- {ID: "mem_free", Name: "free"},
- {ID: "heap_used_eden", Name: "eden"},
- {ID: "heap_used_survivor", Name: "survivor"},
- {ID: "heap_used_old", Name: "old"},
- },
- },
- {
- ID: "heap_eden",
- Title: "Eden Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_eden",
- Dims: Dims{
- {ID: "heap_used_eden", Name: "used"},
- {ID: "heap_committed_eden", Name: "committed"},
- },
- },
- {
- ID: "heap_survivor",
- Title: "Survivor Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_survivor",
- Dims: Dims{
- {ID: "heap_used_survivor", Name: "used"},
- {ID: "heap_committed_survivor", Name: "committed"},
- },
- },
- {
- ID: "heap_old",
- Title: "Old Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_old",
- Dims: Dims{
- {ID: "heap_used_old", Name: "used"},
- {ID: "heap_committed_old", Name: "committed"},
- },
- },
- {
- ID: "uptime",
- Title: "The uptime of the Java virtual machine", Units: "seconds", Fam: "uptime", Type: module.Line, Ctx: "springboot2.uptime",
- Dims: Dims{
- {ID: "uptime", Name: "uptime", Div: 1000},
- },
- },
-}
diff --git a/modules/springboot2/config_schema.json b/modules/springboot2/config_schema.json
deleted file mode 100644
index 008a8bb2d..000000000
--- a/modules/springboot2/config_schema.json
+++ /dev/null
@@ -1,76 +0,0 @@
-{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/springboot2 job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "uri_filter": {
- "type": "object",
- "properties": {
- "includes": {
- "type": "array",
- "items": {
- "type": "string"
- }
- },
- "excludes": {
- "type": "array",
- "items": {
- "type": "string"
- }
- }
- }
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- },
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
- }
- },
- "required": [
- "name",
- "url"
- ]
-}
diff --git a/modules/springboot2/integrations/java_spring-boot_2_applications.md b/modules/springboot2/integrations/java_spring-boot_2_applications.md
deleted file mode 100644
index 26465d66c..000000000
--- a/modules/springboot2/integrations/java_spring-boot_2_applications.md
+++ /dev/null
@@ -1,233 +0,0 @@
-
-
-# Java Spring-boot 2 applications
-
-
-
-
-
-Plugin: go.d.plugin
-Module: springboot2
-
-
-
-## Overview
-
-This collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library.
-
-
-
-
-This collector is supported on all platforms.
-
-This collector supports collecting metrics from multiple instances of this integration, including remote instances.
-
-
-### Default Behavior
-
-#### Auto-Detection
-
-By default, it detects applications running on localhost.
-
-
-#### Limits
-
-The default configuration for this integration does not impose any limits on data collection.
-
-#### Performance Impact
-
-The default configuration for this integration is not expected to impose a significant performance impact on the system.
-
-
-## Metrics
-
-Metrics grouped by *scope*.
-
-The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels.
-
-
-
-### Per Java Spring-boot 2 applications instance
-
-These metrics refer to the entire monitored application.
-
-This scope has no labels.
-
-Metrics:
-
-| Metric | Dimensions | Unit |
-|:------|:----------|:----|
-| springboot2.response_codes | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s |
-| springboot2.thread | daemon, total | threads |
-| springboot2.heap | free, eden, survivor, old | B |
-| springboot2.heap_eden | used, commited | B |
-| springboot2.heap_survivor | used, commited | B |
-| springboot2.heap_old | used, commited | B |
-| springboot2.uptime | uptime | seconds |
-
-
-
-## Alerts
-
-There are no alerts configured by default for this integration.
-
-
-## Setup
-
-### Prerequisites
-
-#### Configure Spring Boot Actuator
-
-The Spring Boot Actuator exposes metrics over HTTP, to use it:
-
-- add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies.
-- set `management.endpoints.web.exposure.include=*` in your `application.properties`.
-
-Refer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.
-
-
-
-### Configuration
-
-#### File
-
-The configuration file name for this integration is `go.d/springboot2.conf`.
-
-
-You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
-
-```bash
-cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
-sudo ./edit-config go.d/springboot2.conf
-```
-#### Options
-
-The following options can be defined globally: update_every, autodetection_retry.
-
-
-Config options
-
-| Name | Description | Default | Required |
-|:----|:-----------|:-------|:--------:|
-| update_every | Data collection frequency. | 1 | no |
-| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no |
-| url | Server URL. | | yes |
-| timeout | HTTP request timeout. | 1 | no |
-| username | Username for basic HTTP authentication. | | no |
-| password | Password for basic HTTP authentication. | | no |
-| proxy_url | Proxy URL. | | no |
-| proxy_username | Username for proxy basic HTTP authentication. | | no |
-| proxy_password | Password for proxy basic HTTP authentication. | | no |
-| method | HTTP request method. | GET | no |
-| body | HTTP request body. | | no |
-| headers | HTTP request headers. | | no |
-| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no |
-| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no |
-| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no |
-| tls_cert | Client TLS certificate. | | no |
-| tls_key | Client TLS key. | | no |
-
-
-
-#### Examples
-
-##### Basic
-
-A basic example configuration.
-
-```yaml
-jobs:
- - name: local
- url: http://127.0.0.1:8080/actuator/prometheus
-
-```
-##### HTTP authentication
-
-Basic HTTP authentication.
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: http://127.0.0.1:8080/actuator/prometheus
- username: username
- password: password
-
-```
-
-
-##### HTTPS with self-signed certificate
-
-Do not validate server certificate chain and hostname.
-
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: https://127.0.0.1:8080/actuator/prometheus
- tls_skip_verify: yes
-
-```
-
-
-##### Multi-instance
-
-> **Note**: When you define multiple jobs, their names must be unique.
-
-Collecting metrics from local and remote instances.
-
-
-Config
-
-```yaml
-jobs:
- - name: local
- url: http://127.0.0.1:8080/actuator/prometheus
-
- - name: remote
- url: http://192.0.2.1:8080/actuator/prometheus
-
-```
-
-
-
-
-## Troubleshooting
-
-### Debug Mode
-
-To troubleshoot issues with the `springboot2` collector, run the `go.d.plugin` with the debug option enabled. The output
-should give you clues as to why the collector isn't working.
-
-- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
- your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
-
- ```bash
- cd /usr/libexec/netdata/plugins.d/
- ```
-
-- Switch to the `netdata` user.
-
- ```bash
- sudo -u netdata -s
- ```
-
-- Run the `go.d.plugin` to debug the collector:
-
- ```bash
- ./go.d.plugin -d -m springboot2
- ```
-
-
diff --git a/modules/springboot2/metadata.yaml b/modules/springboot2/metadata.yaml
deleted file mode 100644
index 462d29dae..000000000
--- a/modules/springboot2/metadata.yaml
+++ /dev/null
@@ -1,239 +0,0 @@
-plugin_name: go.d.plugin
-modules:
- - meta:
- id: collector-go.d.plugin-springboot2
- plugin_name: go.d.plugin
- module_name: springboot2
- monitored_instance:
- name: Java Spring-boot 2 applications
- link: ""
- icon_filename: springboot.png
- categories:
- - data-collection.apm
- keywords:
- - springboot
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- info_provided_to_referring_integrations:
- description: ""
- most_popular: true
- overview:
- data_collection:
- metrics_description: |
- This collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library.
- method_description: ""
- supported_platforms:
- include: []
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: |
- By default, it detects applications running on localhost.
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Configure Spring Boot Actuator
- description: |
- The Spring Boot Actuator exposes metrics over HTTP, to use it:
-
- - add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies.
- - set `management.endpoints.web.exposure.include=*` in your `application.properties`.
-
- Refer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information.
- configuration:
- file:
- name: go.d/springboot2.conf
- options:
- description: |
- The following options can be defined globally: update_every, autodetection_retry.
- folding:
- title: Config options
- enabled: true
- list:
- - name: update_every
- description: Data collection frequency.
- default_value: 1
- required: false
- - name: autodetection_retry
- description: Recheck interval in seconds. Zero means no recheck will be scheduled.
- default_value: 0
- required: false
- - name: url
- description: Server URL.
- default_value: ""
- required: true
- - name: timeout
- description: HTTP request timeout.
- default_value: 1
- required: false
- - name: username
- description: Username for basic HTTP authentication.
- default_value: ""
- required: false
- - name: password
- description: Password for basic HTTP authentication.
- default_value: ""
- required: false
- - name: proxy_url
- description: Proxy URL.
- default_value: ""
- required: false
- - name: proxy_username
- description: Username for proxy basic HTTP authentication.
- default_value: ""
- required: false
- - name: proxy_password
- description: Password for proxy basic HTTP authentication.
- default_value: ""
- required: false
- - name: method
- description: HTTP request method.
- default_value: GET
- required: false
- - name: body
- description: HTTP request body.
- default_value: ""
- required: false
- - name: headers
- description: HTTP request headers.
- default_value: ""
- required: false
- - name: not_follow_redirects
- description: Redirect handling policy. Controls whether the client follows redirects.
- default_value: no
- required: false
- - name: tls_skip_verify
- description: Server certificate chain and hostname validation policy. Controls whether the client performs this check.
- default_value: no
- required: false
- - name: tls_ca
- description: Certification authority that the client uses when verifying the server's certificates.
- default_value: ""
- required: false
- - name: tls_cert
- description: Client TLS certificate.
- default_value: ""
- required: false
- - name: tls_key
- description: Client TLS key.
- default_value: ""
- required: false
- examples:
- folding:
- title: Config
- enabled: true
- list:
- - name: Basic
- folding:
- enabled: false
- description: A basic example configuration.
- config: |
- jobs:
- - name: local
- url: http://127.0.0.1:8080/actuator/prometheus
- - name: HTTP authentication
- description: Basic HTTP authentication.
- config: |
- jobs:
- - name: local
- url: http://127.0.0.1:8080/actuator/prometheus
- username: username
- password: password
- - name: HTTPS with self-signed certificate
- description: |
- Do not validate server certificate chain and hostname.
- config: |
- jobs:
- - name: local
- url: https://127.0.0.1:8080/actuator/prometheus
- tls_skip_verify: yes
- - name: Multi-instance
- description: |
- > **Note**: When you define multiple jobs, their names must be unique.
-
- Collecting metrics from local and remote instances.
- config: |
- jobs:
- - name: local
- url: http://127.0.0.1:8080/actuator/prometheus
-
- - name: remote
- url: http://192.0.2.1:8080/actuator/prometheus
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: These metrics refer to the entire monitored application.
- labels: []
- metrics:
- - name: springboot2.response_codes
- description: Response Codes
- unit: requests/s
- chart_type: stacked
- dimensions:
- - name: 1xx
- - name: 2xx
- - name: 3xx
- - name: 4xx
- - name: 5xx
- - name: springboot2.thread
- description: Threads
- unit: threads
- chart_type: area
- dimensions:
- - name: daemon
- - name: total
- - name: springboot2.heap
- description: Overview
- unit: B
- chart_type: stacked
- dimensions:
- - name: free
- - name: eden
- - name: survivor
- - name: old
- - name: springboot2.heap_eden
- description: Eden Space
- unit: B
- chart_type: area
- dimensions:
- - name: used
- - name: commited
- - name: springboot2.heap_survivor
- description: Survivor Space
- unit: B
- chart_type: area
- dimensions:
- - name: used
- - name: commited
- - name: springboot2.heap_old
- description: Old Space
- unit: B
- chart_type: area
- dimensions:
- - name: used
- - name: commited
- - name: springboot2.uptime
- description: TThe uptime of the Java virtual machine
- unit: seconds
- chart_type: line
- dimensions:
- - name: uptime
diff --git a/modules/springboot2/springboot2.go b/modules/springboot2/springboot2.go
deleted file mode 100644
index cff9d9c07..000000000
--- a/modules/springboot2/springboot2.go
+++ /dev/null
@@ -1,190 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package springboot2
-
-import (
- _ "embed"
- "strings"
- "time"
-
- "github.com/netdata/go.d.plugin/pkg/matcher"
-
- mtx "github.com/netdata/go.d.plugin/pkg/metrics"
- "github.com/netdata/go.d.plugin/pkg/prometheus"
- "github.com/netdata/go.d.plugin/pkg/stm"
- "github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
-)
-
-//go:embed "config_schema.json"
-var configSchema string
-
-func init() {
- module.Register("springboot2", module.Creator{
- JobConfigSchema: configSchema,
- Create: func() module.Module { return New() },
- })
-}
-
-const (
- defaultHTTPTimeout = time.Second
-)
-
-// New returns SpringBoot2 instance with default values
-func New() *SpringBoot2 {
- return &SpringBoot2{
- HTTP: web.HTTP{
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
- },
- },
- }
-}
-
-// SpringBoot2 Spring boot 2 module
-type SpringBoot2 struct {
- module.Base
-
- web.HTTP `yaml:",inline"`
- URIFilter matcher.SimpleExpr `yaml:"uri_filter"`
-
- uriFilter matcher.Matcher
-
- prom prometheus.Prometheus
-}
-
-type metrics struct {
- Uptime mtx.Gauge `stm:"uptime,1000"`
-
- ThreadsDaemon mtx.Gauge `stm:"threads_daemon"`
- Threads mtx.Gauge `stm:"threads"`
-
- Resp1xx mtx.Counter `stm:"resp_1xx"`
- Resp2xx mtx.Counter `stm:"resp_2xx"`
- Resp3xx mtx.Counter `stm:"resp_3xx"`
- Resp4xx mtx.Counter `stm:"resp_4xx"`
- Resp5xx mtx.Counter `stm:"resp_5xx"`
-
- HeapUsed heap `stm:"heap_used"`
- HeapCommitted heap `stm:"heap_committed"`
-
- MemFree mtx.Gauge `stm:"mem_free"`
-}
-
-type heap struct {
- Eden mtx.Gauge `stm:"eden"`
- Survivor mtx.Gauge `stm:"survivor"`
- Old mtx.Gauge `stm:"old"`
-}
-
-// Cleanup Cleanup
-func (SpringBoot2) Cleanup() {}
-
-// Init makes initialization
-func (s *SpringBoot2) Init() bool {
- client, err := web.NewHTTPClient(s.Client)
- if err != nil {
- s.Error(err)
- return false
- }
- s.uriFilter, err = s.URIFilter.Parse()
- if err != nil && err != matcher.ErrEmptyExpr {
- s.Error(err)
- return false
- }
- s.prom = prometheus.New(client, s.Request)
- return true
-}
-
-// Check makes check
-func (s *SpringBoot2) Check() bool {
- rawMetrics, err := s.prom.ScrapeSeries()
- if err != nil {
- s.Warning(err)
- return false
- }
- jvmMemory := rawMetrics.FindByName("jvm_memory_used_bytes")
-
- return len(jvmMemory) > 0
-}
-
-// Charts creates Charts
-func (SpringBoot2) Charts() *Charts {
- return charts.Copy()
-}
-
-// Collect collects metrics
-func (s *SpringBoot2) Collect() map[string]int64 {
- rawMetrics, err := s.prom.ScrapeSeries()
- if err != nil {
- return nil
- }
-
- var m metrics
-
- // uptime
- m.Uptime.Set(rawMetrics.FindByName("process_uptime_seconds").Max())
-
- // response
- s.gatherResponse(rawMetrics, &m)
-
- // threads
- m.ThreadsDaemon.Set(rawMetrics.FindByNames("jvm_threads_daemon", "jvm_threads_daemon_threads").Max())
- m.Threads.Set(rawMetrics.FindByNames("jvm_threads_live", "jvm_threads_live_threads").Max())
-
- // heap memory
- gatherHeap(rawMetrics.FindByName("jvm_memory_used_bytes"), &m.HeapUsed)
- gatherHeap(rawMetrics.FindByName("jvm_memory_committed_bytes"), &m.HeapCommitted)
- m.MemFree.Set(m.HeapCommitted.Sum() - m.HeapUsed.Sum())
-
- return stm.ToMap(m)
-}
-
-func gatherHeap(rawMetrics prometheus.Series, m *heap) {
- for _, metric := range rawMetrics {
- id := metric.Labels.Get("id")
- value := metric.Value
- switch {
- case strings.Contains(id, "Eden"):
- m.Eden.Set(value)
- case strings.Contains(id, "Survivor"):
- m.Survivor.Set(value)
- case strings.Contains(id, "Old") || strings.Contains(id, "Tenured"):
- m.Old.Set(value)
- }
- }
-}
-
-func (s *SpringBoot2) gatherResponse(rawMetrics prometheus.Series, m *metrics) {
- for _, metric := range rawMetrics.FindByName("http_server_requests_seconds_count") {
- if s.uriFilter != nil {
- uri := metric.Labels.Get("uri")
- if !s.uriFilter.MatchString(uri) {
- continue
- }
- }
-
- status := metric.Labels.Get("status")
- if status == "" {
- continue
- }
- value := metric.Value
- switch status[0] {
- case '1':
- m.Resp1xx.Add(value)
- case '2':
- m.Resp2xx.Add(value)
- case '3':
- m.Resp3xx.Add(value)
- case '4':
- m.Resp4xx.Add(value)
- case '5':
- m.Resp5xx.Add(value)
- }
- }
-}
-
-func (h heap) Sum() float64 {
- return h.Eden.Value() + h.Survivor.Value() + h.Old.Value()
-}
diff --git a/modules/springboot2/springboot2_test.go b/modules/springboot2/springboot2_test.go
deleted file mode 100644
index 7198498d5..000000000
--- a/modules/springboot2/springboot2_test.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-package springboot2
-
-import (
- "net/http"
- "net/http/httptest"
- "os"
- "testing"
-
- "github.com/stretchr/testify/assert"
-)
-
-var (
- testdata, _ = os.ReadFile("tests/testdata.txt")
- testdata2, _ = os.ReadFile("tests/testdata2.txt")
-)
-
-func TestSpringboot2_Collect(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- switch r.URL.Path {
- case "/actuator/prometheus":
- _, _ = w.Write(testdata)
- case "/actuator/prometheus2":
- _, _ = w.Write(testdata2)
- }
- }))
- defer ts.Close()
- job1 := New()
- job1.HTTP.Request.URL = ts.URL + "/actuator/prometheus"
- assert.True(t, job1.Init())
- assert.True(t, job1.Check())
- assert.EqualValues(
- t,
- map[string]int64{
- "threads": 23,
- "threads_daemon": 21,
- "resp_1xx": 1,
- "resp_2xx": 19,
- "resp_3xx": 1,
- "resp_4xx": 4,
- "resp_5xx": 1,
- "heap_used_eden": 129649936,
- "heap_used_survivor": 8900136,
- "heap_used_old": 17827920,
- "heap_committed_eden": 153616384,
- "heap_committed_survivor": 8912896,
- "heap_committed_old": 40894464,
- "mem_free": 47045752,
- "uptime": 191730,
- },
- job1.Collect(),
- )
-
- job2 := New()
- job2.HTTP.Request.URL = ts.URL + "/actuator/prometheus2"
- assert.True(t, job2.Init())
- assert.True(t, job2.Check())
- assert.EqualValues(
- t,
- map[string]int64{
- "threads": 36,
- "threads_daemon": 22,
- "resp_1xx": 0,
- "resp_2xx": 57740,
- "resp_3xx": 0,
- "resp_4xx": 4,
- "resp_5xx": 0,
- "heap_used_eden": 18052960,
- "heap_used_survivor": 302704,
- "heap_used_old": 40122672,
- "heap_committed_eden": 21430272,
- "heap_committed_survivor": 2621440,
- "heap_committed_old": 53182464,
- "mem_free": 18755840,
- "uptime": 45501125,
- },
- job2.Collect(),
- )
-}
-
-func TestSpringboot2_404(t *testing.T) {
- ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
- w.WriteHeader(404)
- }))
- defer ts.Close()
- job := New()
- job.HTTP.Request.URL = ts.URL + "/actuator/prometheus"
-
- job.Init()
-
- assert.False(t, job.Check())
-
- job.Cleanup()
-}
-
-func TestSpringBoot2_Charts(t *testing.T) {
- job := New()
- charts := job.Charts()
-
- assert.True(t, charts.Has("response_codes"))
- assert.True(t, charts.Has("uptime"))
-}
diff --git a/modules/springboot2/tests/testdata.txt b/modules/springboot2/tests/testdata.txt
deleted file mode 100644
index 11c70e40d..000000000
--- a/modules/springboot2/tests/testdata.txt
+++ /dev/null
@@ -1,194 +0,0 @@
-# HELP tomcat_cache_access_total
-# TYPE tomcat_cache_access_total counter
-tomcat_cache_access_total 0.0
-# HELP jvm_gc_memory_promoted_bytes_total Count of positive increases in the size of the old generation memory pool before GC to after GC
-# TYPE jvm_gc_memory_promoted_bytes_total counter
-jvm_gc_memory_promoted_bytes_total 562080.0
-# HELP tomcat_cache_hit_total
-# TYPE tomcat_cache_hit_total counter
-tomcat_cache_hit_total 0.0
-# HELP jvm_gc_live_data_size_bytes Size of old generation memory pool after a full GC
-# TYPE jvm_gc_live_data_size_bytes gauge
-jvm_gc_live_data_size_bytes 0.0
-# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management
-# TYPE jvm_memory_max_bytes gauge
-jvm_memory_max_bytes{area="nonheap",id="Code Cache",} 2.5165824E8
-jvm_memory_max_bytes{area="nonheap",id="Metaspace",} -1.0
-jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9
-jvm_memory_max_bytes{area="heap",id="PS Eden Space",} 1.55189248E8
-jvm_memory_max_bytes{area="heap",id="PS Survivor Space",} 8912896.0
-jvm_memory_max_bytes{area="heap",id="PS Old Gen",} 3.49700096E8
-# HELP system_cpu_count The number of processors available to the Java virtual machine
-# TYPE system_cpu_count gauge
-system_cpu_count 2.0
-# HELP tomcat_global_request_seconds
-# TYPE tomcat_global_request_seconds summary
-tomcat_global_request_seconds_count{name="http-nio-8080",} 23.0
-tomcat_global_request_seconds_sum{name="http-nio-8080",} 1.205
-# HELP jvm_threads_daemon The current number of live daemon threads
-# TYPE jvm_threads_daemon gauge
-jvm_threads_daemon 21.0
-# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool
-# TYPE jvm_buffer_memory_used_bytes gauge
-jvm_buffer_memory_used_bytes{id="direct",} 81920.0
-jvm_buffer_memory_used_bytes{id="mapped",} 0.0
-# HELP jvm_buffer_count An estimate of the number of buffers in the pool
-# TYPE jvm_buffer_count gauge
-jvm_buffer_count{id="direct",} 10.0
-jvm_buffer_count{id="mapped",} 0.0
-# HELP tomcat_threads_current
-# TYPE tomcat_threads_current gauge
-tomcat_threads_current{name="http-nio-8080",} 10.0
-# HELP tomcat_sessions_created_total
-# TYPE tomcat_sessions_created_total counter
-tomcat_sessions_created_total 0.0
-# HELP system_cpu_usage The "recent cpu usage" for the whole system
-# TYPE system_cpu_usage gauge
-system_cpu_usage 0.03682658419046249
-# HELP tomcat_sessions_alive_max_seconds
-# TYPE tomcat_sessions_alive_max_seconds gauge
-tomcat_sessions_alive_max_seconds 0.0
-# HELP tomcat_servlet_error_total
-# TYPE tomcat_servlet_error_total counter
-tomcat_servlet_error_total{name="default",} 0.0
-# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time
-# TYPE system_load_average_1m gauge
-system_load_average_1m 0.2001953125
-# HELP jvm_gc_max_data_size_bytes Max size of old generation memory pool
-# TYPE jvm_gc_max_data_size_bytes gauge
-jvm_gc_max_data_size_bytes 0.0
-# HELP tomcat_sessions_expired_total
-# TYPE tomcat_sessions_expired_total counter
-tomcat_sessions_expired_total 0.0
-# HELP tomcat_sessions_rejected_total
-# TYPE tomcat_sessions_rejected_total counter
-tomcat_sessions_rejected_total 0.0
-# HELP process_start_time_seconds The start time of the Java virtual machine
-# TYPE process_start_time_seconds gauge
-process_start_time_seconds 1.544161580708E9
-# HELP jvm_threads_live The current number of live threads including both daemon and non-daemon threads
-# TYPE jvm_threads_live gauge
-jvm_threads_live 23.0
-# HELP jvm_classes_loaded The number of classes that are currently loaded in the Java virtual machine
-# TYPE jvm_classes_loaded gauge
-jvm_classes_loaded 7846.0
-# HELP jvm_gc_memory_allocated_bytes_total Incremented for an increase in the size of the young generation memory pool after one GC to before the next
-# TYPE jvm_gc_memory_allocated_bytes_total counter
-jvm_gc_memory_allocated_bytes_total 3.13524224E8
-# HELP process_uptime_seconds The uptime of the Java virtual machine
-# TYPE process_uptime_seconds gauge
-process_uptime_seconds 191.73
-# HELP tomcat_global_error_total
-# TYPE tomcat_global_error_total counter
-tomcat_global_error_total{name="http-nio-8080",} 4.0
-# HELP tomcat_threads_config_max
-# TYPE tomcat_threads_config_max gauge
-tomcat_threads_config_max{name="http-nio-8080",} 200.0
-# HELP jvm_threads_peak The peak live thread count since the Java virtual machine started or peak was reset
-# TYPE jvm_threads_peak gauge
-jvm_threads_peak 25.0
-# HELP jvm_classes_unloaded_total The total number of classes unloaded since the Java virtual machine has started execution
-# TYPE jvm_classes_unloaded_total counter
-jvm_classes_unloaded_total 0.0
-# HELP process_files_max The maximum file descriptor count
-# TYPE process_files_max gauge
-process_files_max 1048576.0
-# HELP tomcat_servlet_request_max_seconds
-# TYPE tomcat_servlet_request_max_seconds gauge
-tomcat_servlet_request_max_seconds{name="default",} 0.0
-# HELP tomcat_sessions_active_max
-# TYPE tomcat_sessions_active_max gauge
-tomcat_sessions_active_max 0.0
-# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use
-# TYPE jvm_memory_committed_bytes gauge
-jvm_memory_committed_bytes{area="nonheap",id="Code Cache",} 1.3369344E7
-jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 4.390912E7
-jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space",} 5636096.0
-jvm_memory_committed_bytes{area="heap",id="PS Eden Space",} 1.53616384E8
-jvm_memory_committed_bytes{area="heap",id="PS Survivor Space",} 8912896.0
-jvm_memory_committed_bytes{area="heap",id="PS Old Gen",} 4.0894464E7
-# HELP tomcat_servlet_request_seconds
-# TYPE tomcat_servlet_request_seconds summary
-tomcat_servlet_request_seconds_count{name="default",} 0.0
-tomcat_servlet_request_seconds_sum{name="default",} 0.0
-# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool
-# TYPE jvm_buffer_total_capacity_bytes gauge
-jvm_buffer_total_capacity_bytes{id="direct",} 81920.0
-jvm_buffer_total_capacity_bytes{id="mapped",} 0.0
-# HELP tomcat_global_received_bytes_total
-# TYPE tomcat_global_received_bytes_total counter
-tomcat_global_received_bytes_total{name="http-nio-8080",} 0.0
-# HELP jvm_gc_pause_seconds Time spent in GC pause
-# TYPE jvm_gc_pause_seconds summary
-jvm_gc_pause_seconds_count{action="end of minor GC",cause="Allocation Failure",} 2.0
-jvm_gc_pause_seconds_sum{action="end of minor GC",cause="Allocation Failure",} 0.06
-# HELP jvm_gc_pause_seconds_max Time spent in GC pause
-# TYPE jvm_gc_pause_seconds_max gauge
-jvm_gc_pause_seconds_max{action="end of minor GC",cause="Allocation Failure",} 0.0
-# HELP process_files_open The open file descriptor count
-# TYPE process_files_open gauge
-process_files_open 29.0
-# HELP tomcat_global_sent_bytes_total
-# TYPE tomcat_global_sent_bytes_total counter
-tomcat_global_sent_bytes_total{name="http-nio-8080",} 63044.0
-# HELP tomcat_threads_busy
-# TYPE tomcat_threads_busy gauge
-tomcat_threads_busy{name="http-nio-8080",} 1.0
-# HELP tomcat_global_request_max_seconds
-# TYPE tomcat_global_request_max_seconds gauge
-tomcat_global_request_max_seconds{name="http-nio-8080",} 0.282
-# HELP process_cpu_usage The "recent cpu usage" for the Java Virtual Machine process
-# TYPE process_cpu_usage gauge
-process_cpu_usage 0.019132561317701215
-# HELP jvm_memory_used_bytes The amount of used memory
-# TYPE jvm_memory_used_bytes gauge
-jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 1.3269376E7
-jvm_memory_used_bytes{area="nonheap",id="Metaspace",} 4.1364704E7
-jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space",} 5125872.0
-jvm_memory_used_bytes{area="heap",id="PS Eden Space",} 1.29649936E8
-jvm_memory_used_bytes{area="heap",id="PS Survivor Space",} 8900136.0
-jvm_memory_used_bytes{area="heap",id="PS Old Gen",} 1.782792E7
-# HELP logback_events_total Number of error level events that made it to the logs
-# TYPE logback_events_total counter
-logback_events_total{level="error",} 0.0
-logback_events_total{level="warn",} 0.0
-logback_events_total{level="info",} 41.0
-logback_events_total{level="debug",} 0.0
-logback_events_total{level="trace",} 0.0
-# HELP tomcat_sessions_active_current
-# TYPE tomcat_sessions_active_current gauge
-tomcat_sessions_active_current 0.0
-# HELP http_server_requests_seconds
-# TYPE http_server_requests_seconds summary
-http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 6.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 0.2367162
-http_server_requests_seconds_count{exception="None",method="GET",status="404",uri="/**",} 3.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="404",uri="/**",} 0.0516521
-http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 5.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 0.0587843
-http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/hello",} 4.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/hello",} 0.0470746
-http_server_requests_seconds_count{exception="None",method="GET",status="102",uri="/hello",} 1.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="102",uri="/hello",} 0.0470746
-http_server_requests_seconds_count{exception="None",method="GET",status="302",uri="/hello",} 1.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="302",uri="/hello",} 0.0470746
-http_server_requests_seconds_count{exception="None",method="GET",status="503",uri="/hello",} 1.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="503",uri="/hello",} 0.0470746
-http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/",} 2.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/",} 0.1888718
-http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/health",} 1.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/health",} 0.0602562
-http_server_requests_seconds_count{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 1.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 0.0349837
-http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/metrics",} 1.0
-http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/metrics",} 0.0170195
-# HELP http_server_requests_seconds_max
-# TYPE http_server_requests_seconds_max gauge
-http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 0.1311382
-http_server_requests_seconds_max{exception="None",method="GET",status="404",uri="/**",} 0.031655
-http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 0.0449076
-http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/hello",} 0.0248288
-http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/",} 0.1840505
-http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/health",} 0.0602562
-http_server_requests_seconds_max{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 0.0349837
-http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/metrics",} 0.0170195
\ No newline at end of file
diff --git a/modules/springboot2/tests/testdata2.txt b/modules/springboot2/tests/testdata2.txt
deleted file mode 100644
index 78bbdf5cd..000000000
--- a/modules/springboot2/tests/testdata2.txt
+++ /dev/null
@@ -1,193 +0,0 @@
-# HELP jvm_classes_loaded_classes The number of classes that are currently loaded in the Java virtual machine
-# TYPE jvm_classes_loaded_classes gauge
-jvm_classes_loaded_classes 12360.0
-# HELP process_files_open_files The open file descriptor count
-# TYPE process_files_open_files gauge
-process_files_open_files 46.0
-# HELP jvm_memory_used_bytes The amount of used memory
-# TYPE jvm_memory_used_bytes gauge
-jvm_memory_used_bytes{area="heap",id="Tenured Gen",} 4.0122672E7
-jvm_memory_used_bytes{area="heap",id="Eden Space",} 1.805296E7
-jvm_memory_used_bytes{area="nonheap",id="Metaspace",} 6.6824752E7
-jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.6224704E7
-jvm_memory_used_bytes{area="heap",id="Survivor Space",} 302704.0
-jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space",} 8236936.0
-# HELP system_cpu_count The number of processors available to the Java virtual machine
-# TYPE system_cpu_count gauge
-system_cpu_count 1.0
-# HELP process_cpu_usage The "recent cpu usage" for the Java Virtual Machine process
-# TYPE process_cpu_usage gauge
-process_cpu_usage 0.0
-# HELP tomcat_sessions_alive_max_seconds
-# TYPE tomcat_sessions_alive_max_seconds gauge
-tomcat_sessions_alive_max_seconds 0.0
-# HELP tomcat_global_sent_bytes_total
-# TYPE tomcat_global_sent_bytes_total counter
-tomcat_global_sent_bytes_total{name="http-nio-17001",} 7.06007212E8
-# HELP jvm_threads_states_threads The current number of threads having NEW state
-# TYPE jvm_threads_states_threads gauge
-jvm_threads_states_threads{state="runnable",} 10.0
-jvm_threads_states_threads{state="blocked",} 0.0
-jvm_threads_states_threads{state="waiting",} 22.0
-jvm_threads_states_threads{state="timed-waiting",} 4.0
-jvm_threads_states_threads{state="new",} 0.0
-jvm_threads_states_threads{state="terminated",} 0.0
-# HELP process_start_time_seconds Start time of the process since unix epoch.
-# TYPE process_start_time_seconds gauge
-process_start_time_seconds 1.552476492313E9
-# HELP tomcat_sessions_active_max_sessions
-# TYPE tomcat_sessions_active_max_sessions gauge
-tomcat_sessions_active_max_sessions 0.0
-# HELP jvm_gc_live_data_size_bytes Size of old generation memory pool after a full GC
-# TYPE jvm_gc_live_data_size_bytes gauge
-jvm_gc_live_data_size_bytes 3.1908592E7
-# HELP spring_integration_channels The number of message channels
-# TYPE spring_integration_channels gauge
-spring_integration_channels 6.0
-# HELP system_cpu_usage The "recent cpu usage" for the whole system
-# TYPE system_cpu_usage gauge
-system_cpu_usage 0.047619047619047616
-# HELP jvm_classes_unloaded_classes_total The total number of classes unloaded since the Java virtual machine has started execution
-# TYPE jvm_classes_unloaded_classes_total counter
-jvm_classes_unloaded_classes_total 0.0
-# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management
-# TYPE jvm_memory_max_bytes gauge
-jvm_memory_max_bytes{area="heap",id="Tenured Gen",} 6.61323776E8
-jvm_memory_max_bytes{area="heap",id="Eden Space",} 2.64568832E8
-jvm_memory_max_bytes{area="nonheap",id="Metaspace",} -1.0
-jvm_memory_max_bytes{area="nonheap",id="Code Cache",} 2.5165824E8
-jvm_memory_max_bytes{area="heap",id="Survivor Space",} 3.3030144E7
-jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9
-# HELP logback_events_total Number of error level events that made it to the logs
-# TYPE logback_events_total counter
-logback_events_total{level="warn",} 1.0
-logback_events_total{level="debug",} 0.0
-logback_events_total{level="error",} 0.0
-logback_events_total{level="trace",} 0.0
-logback_events_total{level="info",} 30.0
-# HELP jvm_gc_max_data_size_bytes Max size of old generation memory pool
-# TYPE jvm_gc_max_data_size_bytes gauge
-jvm_gc_max_data_size_bytes 6.61323776E8
-# HELP tomcat_sessions_created_sessions_total
-# TYPE tomcat_sessions_created_sessions_total counter
-tomcat_sessions_created_sessions_total 0.0
-# HELP process_files_max_files The maximum file descriptor count
-# TYPE process_files_max_files gauge
-process_files_max_files 1006500.0
-# HELP spring_integration_sources The number of message sources
-# TYPE spring_integration_sources gauge
-spring_integration_sources 5.0
-# HELP tomcat_global_request_seconds
-# TYPE tomcat_global_request_seconds summary
-tomcat_global_request_seconds_count{name="http-nio-17001",} 57744.0
-tomcat_global_request_seconds_sum{name="http-nio-17001",} 113.513
-# HELP tomcat_sessions_active_current_sessions
-# TYPE tomcat_sessions_active_current_sessions gauge
-tomcat_sessions_active_current_sessions 0.0
-# HELP tomcat_global_error_total
-# TYPE tomcat_global_error_total counter
-tomcat_global_error_total{name="http-nio-17001",} 0.0
-# HELP jvm_threads_daemon_threads The current number of live daemon threads
-# TYPE jvm_threads_daemon_threads gauge
-jvm_threads_daemon_threads 22.0
-# HELP jvm_gc_memory_allocated_bytes_total Incremented for an increase in the size of the young generation memory pool after one GC to before the next
-# TYPE jvm_gc_memory_allocated_bytes_total counter
-jvm_gc_memory_allocated_bytes_total 2.7071024304E10
-# HELP http_server_requests_seconds
-# TYPE http_server_requests_seconds summary
-http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 57717.0
-http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 108.648599202
-http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 13.0
-http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 2.504856475
-http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 1.0
-http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 5.959808087
-http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 9.0
-http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 0.0506538
-http_server_requests_seconds_count{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 4.0
-http_server_requests_seconds_sum{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 0.00875155
-# HELP http_server_requests_seconds_max
-# TYPE http_server_requests_seconds_max gauge
-http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 0.007270684
-http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 0.0
-http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 0.0
-http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 0.0
-http_server_requests_seconds_max{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 0.0
-# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool
-# TYPE jvm_buffer_total_capacity_bytes gauge
-jvm_buffer_total_capacity_bytes{id="direct",} 278528.0
-jvm_buffer_total_capacity_bytes{id="mapped",} 0.0
-# HELP spring_integration_handlers The number of message handlers
-# TYPE spring_integration_handlers gauge
-spring_integration_handlers 5.0
-# HELP jvm_gc_memory_promoted_bytes_total Count of positive increases in the size of the old generation memory pool before GC to after GC
-# TYPE jvm_gc_memory_promoted_bytes_total counter
-jvm_gc_memory_promoted_bytes_total 2.4583704E7
-# HELP jvm_buffer_count_buffers An estimate of the number of buffers in the pool
-# TYPE jvm_buffer_count_buffers gauge
-jvm_buffer_count_buffers{id="direct",} 15.0
-jvm_buffer_count_buffers{id="mapped",} 0.0
-# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use
-# TYPE jvm_memory_committed_bytes gauge
-jvm_memory_committed_bytes{area="heap",id="Tenured Gen",} 5.3182464E7
-jvm_memory_committed_bytes{area="heap",id="Eden Space",} 2.1430272E7
-jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 7.0803456E7
-jvm_memory_committed_bytes{area="nonheap",id="Code Cache",} 2.6804224E7
-jvm_memory_committed_bytes{area="heap",id="Survivor Space",} 2621440.0
-jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space",} 8953856.0
-# HELP tomcat_global_request_max_seconds
-# TYPE tomcat_global_request_max_seconds gauge
-tomcat_global_request_max_seconds{name="http-nio-17001",} 6.049
-# HELP process_uptime_seconds The uptime of the Java virtual machine
-# TYPE process_uptime_seconds gauge
-process_uptime_seconds 45501.125
-# HELP tomcat_threads_config_max_threads
-# TYPE tomcat_threads_config_max_threads gauge
-tomcat_threads_config_max_threads{name="http-nio-17001",} 200.0
-# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool
-# TYPE jvm_buffer_memory_used_bytes gauge
-jvm_buffer_memory_used_bytes{id="direct",} 278529.0
-jvm_buffer_memory_used_bytes{id="mapped",} 0.0
-# HELP http_client_requests_seconds Timer of WebClient operation
-# TYPE http_client_requests_seconds summary
-http_client_requests_seconds_count{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 1.0
-http_client_requests_seconds_sum{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 2.258042154
-http_client_requests_seconds_count{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 2.0
-http_client_requests_seconds_sum{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 0.305785165
-# HELP http_client_requests_seconds_max Timer of WebClient operation
-# TYPE http_client_requests_seconds_max gauge
-http_client_requests_seconds_max{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 0.0
-http_client_requests_seconds_max{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 0.0
-# HELP tomcat_global_received_bytes_total
-# TYPE tomcat_global_received_bytes_total counter
-tomcat_global_received_bytes_total{name="http-nio-17001",} 0.0
-# HELP jvm_threads_peak_threads The peak live thread count since the Java virtual machine started or peak was reset
-# TYPE jvm_threads_peak_threads gauge
-jvm_threads_peak_threads 36.0
-# HELP jvm_threads_live_threads The current number of live threads including both daemon and non-daemon threads
-# TYPE jvm_threads_live_threads gauge
-jvm_threads_live_threads 36.0
-# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time
-# TYPE system_load_average_1m gauge
-system_load_average_1m 0.02
-# HELP tomcat_threads_current_threads
-# TYPE tomcat_threads_current_threads gauge
-tomcat_threads_current_threads{name="http-nio-17001",} 10.0
-# HELP tomcat_sessions_expired_sessions_total
-# TYPE tomcat_sessions_expired_sessions_total counter
-tomcat_sessions_expired_sessions_total 0.0
-# HELP tomcat_sessions_rejected_sessions_total
-# TYPE tomcat_sessions_rejected_sessions_total counter
-tomcat_sessions_rejected_sessions_total 0.0
-# HELP jvm_gc_pause_seconds Time spent in GC pause
-# TYPE jvm_gc_pause_seconds summary
-jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0
-jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.1
-jvm_gc_pause_seconds_count{action="end of minor GC",cause="Allocation Failure",} 1269.0
-jvm_gc_pause_seconds_sum{action="end of minor GC",cause="Allocation Failure",} 5.909
-# HELP jvm_gc_pause_seconds_max Time spent in GC pause
-# TYPE jvm_gc_pause_seconds_max gauge
-jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0
-jvm_gc_pause_seconds_max{action="end of minor GC",cause="Allocation Failure",} 0.004
-# HELP tomcat_threads_busy_threads
-# TYPE tomcat_threads_busy_threads gauge
-tomcat_threads_busy_threads{name="http-nio-17001",} 1.0
\ No newline at end of file
diff --git a/modules/squidlog/collect.go b/modules/squidlog/collect.go
index 20d3f86e8..bafa6d4cc 100644
--- a/modules/squidlog/collect.go
+++ b/modules/squidlog/collect.go
@@ -14,7 +14,7 @@ import (
"github.com/netdata/go.d.plugin/agent/module"
)
-func (s SquidLog) logPanicStackIfAny() {
+func (s *SquidLog) logPanicStackIfAny() {
err := recover()
if err == nil {
return
diff --git a/modules/squidlog/config_schema.json b/modules/squidlog/config_schema.json
index dcf439c70..bdbf94c5b 100644
--- a/modules/squidlog/config_schema.json
+++ b/modules/squidlog/config_schema.json
@@ -1,101 +1,194 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/squid_log job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "minimum": 1,
+ "default": 1,
+ "type": "integer"
+ },
+ "path": {
+ "title": "Log file path",
+ "description": "The file path to the Squid server log file.",
+ "type": "string",
+ "default": "/var/log/squid/access.log"
+ },
+ "exclude_path": {
+ "title": "Exclude path",
+ "description": "Pattern to exclude log files.",
+ "type": "string",
+ "default": "*.gz"
+ },
+ "log_type": {
+ "title": "Log parser",
+ "description": "Type of parser to use for parsing the Squid server log file.",
+ "type": "string",
+ "enum": [
+ "csv",
+ "regexp",
+ "json",
+ "ltsv"
+ ],
+ "default": "csv"
+ }
},
- "parser": {
- "type": "object",
- "properties": {
- "log_type": {
- "type": "string"
- },
- "csv_config": {
- "type": "object",
- "properties": {
- "fields_per_record": {
- "type": "integer"
- },
- "delimiter": {
- "type": "string"
- },
- "trim_leading_space": {
- "type": "boolean"
+ "required": [
+ "path",
+ "log_type"
+ ],
+ "dependencies": {
+ "log_type": {
+ "oneOf": [
+ {
+ "properties": {
+ "log_type": {
+ "const": "csv"
+ },
+ "csv_config": {
+ "title": "CSV parser configuration",
+ "type": "object",
+ "properties": {
+ "format": {
+ "title": "Format",
+ "description": "Log format.",
+ "type": "string",
+ "default": "$remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent"
+ },
+ "delimiter": {
+ "title": "Delimiter",
+ "description": "Delimiter used to separate fields in the log file. Default: space (' ').",
+ "type": "string",
+ "default": " "
+ }
+ },
+ "required": [
+ "format",
+ "delimiter"
+ ]
+ }
},
- "format": {
- "type": "string"
- }
+ "required": [
+ "csv_config"
+ ]
},
- "required": [
- "fields_per_record",
- "delimiter",
- "trim_leading_space",
- "format"
- ]
- },
- "ltsv_config": {
- "type": "object",
- "properties": {
- "field_delimiter": {
- "type": "string"
- },
- "value_delimiter": {
- "type": "string"
+ {
+ "properties": {
+ "log_type": {
+ "const": "regexp"
+ },
+ "regexp_config": {
+ "title": "Regular expression parser configuration",
+ "type": "object",
+ "properties": {
+ "pattern": {
+ "title": "Pattern with named groups",
+ "description": "Regular expression pattern with named groups. Use named groups for known fields.",
+ "type": "string",
+ "default": ""
+ }
+ },
+ "required": [
+ "pattern"
+ ]
+ }
},
- "mapping": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
+ "required": [
+ "regexp_config"
+ ]
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "json"
+ },
+ "json_config": {
+ "title": "JSON parser configuration",
+ "type": "object",
+ "properties": {
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
}
}
},
- "required": [
- "field_delimiter",
- "value_delimiter",
- "mapping"
- ]
- },
- "regexp_config": {
- "type": "object",
- "properties": {
- "pattern": {
- "type": "string"
+ {
+ "properties": {
+ "log_type": {
+ "const": "ltsv"
+ },
+ "ltsv_config": {
+ "title": "LTSV parser configuration",
+ "type": "object",
+ "properties": {
+ "field_delimiter": {
+ "title": "Field delimiter",
+ "description": "Delimiter used to separate fields in LTSV logs. Default: tab ('\\t').",
+ "type": "string",
+ "default": "\t"
+ },
+ "value_delimiter": {
+ "title": "Value delimiter",
+ "description": "Delimiter used to separate label-value pairs in LTSV logs.",
+ "type": "string",
+ "default": ":"
+ },
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
}
- },
- "required": [
- "pattern"
+ }
+ ]
+ }
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ },
+ "log_type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
+ }
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "path",
+ "exclude_path"
]
},
- "json_config": {
- "type": "object",
- "properties": {
- "mapping": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
- }
- },
- "required": [
- "mapping"
+ {
+ "title": "Parser",
+ "fields": [
+ "log_type",
+ "csv_config",
+ "ltsv_config",
+ "regexp_config",
+ "json_config"
]
}
- },
- "required": [
- "log_type"
]
- },
- "path": {
- "type": "string"
- },
- "exclude_path": {
- "type": "string"
}
- },
- "required": [
- "name",
- "path"
- ]
+ }
}
diff --git a/modules/squidlog/init.go b/modules/squidlog/init.go
index 60c2c4586..da6082c0f 100644
--- a/modules/squidlog/init.go
+++ b/modules/squidlog/init.go
@@ -34,7 +34,7 @@ func (s *SquidLog) createParser() error {
lastLine = bytes.TrimRight(lastLine, "\n")
s.Debugf("last line: '%s'", string(lastLine))
- s.parser, err = logs.NewParser(s.Parser, s.file)
+ s.parser, err = logs.NewParser(s.ParserConfig, s.file)
if err != nil {
return fmt.Errorf("create parser: %v", err)
}
diff --git a/modules/squidlog/squidlog.go b/modules/squidlog/squidlog.go
index 704bc9627..738e37f8f 100644
--- a/modules/squidlog/squidlog.go
+++ b/modules/squidlog/squidlog.go
@@ -20,68 +20,72 @@ func init() {
}
func New() *SquidLog {
- cfg := logs.ParserConfig{
- LogType: logs.TypeCSV,
- CSV: logs.CSVConfig{
- FieldsPerRecord: -1,
- Delimiter: " ",
- TrimLeadingSpace: true,
- Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type",
- CheckField: checkCSVFormatField,
- },
- }
return &SquidLog{
Config: Config{
Path: "/var/log/squid/access.log",
ExcludePath: "*.gz",
- Parser: cfg,
+ ParserConfig: logs.ParserConfig{
+ LogType: logs.TypeCSV,
+ CSV: logs.CSVConfig{
+ FieldsPerRecord: -1,
+ Delimiter: " ",
+ TrimLeadingSpace: true,
+ Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type",
+ CheckField: checkCSVFormatField,
+ },
+ },
},
}
}
-type (
- Config struct {
- Parser logs.ParserConfig `yaml:",inline"`
- Path string `yaml:"path"`
- ExcludePath string `yaml:"exclude_path"`
- }
+type Config struct {
+ logs.ParserConfig `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Path string `yaml:"path" json:"path"`
+ ExcludePath string `yaml:"exclude_path" json:"exclude_path"`
+}
- SquidLog struct {
- module.Base
- Config `yaml:",inline"`
+type SquidLog struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- file *logs.Reader
- parser logs.Parser
- line *logLine
+ charts *module.Charts
- mx *metricsData
- charts *module.Charts
- }
-)
+ file *logs.Reader
+ parser logs.Parser
+ line *logLine
-func (s *SquidLog) Init() bool {
+ mx *metricsData
+}
+
+func (s *SquidLog) Configuration() any {
+ return s.Config
+}
+
+func (s *SquidLog) Init() error {
s.line = newEmptyLogLine()
s.mx = newMetricsData()
- return true
+ return nil
}
-func (s *SquidLog) Check() bool {
+func (s *SquidLog) Check() error {
// Note: these inits are here to make auto-detection retry working
if err := s.createLogReader(); err != nil {
s.Warning("check failed: ", err)
- return false
+ return err
}
if err := s.createParser(); err != nil {
s.Warning("check failed: ", err)
- return false
+ return err
}
if err := s.createCharts(s.line); err != nil {
s.Warning("check failed: ", err)
- return false
+ return err
}
- return true
+
+ return nil
}
func (s *SquidLog) Charts() *module.Charts {
diff --git a/modules/squidlog/squidlog_test.go b/modules/squidlog/squidlog_test.go
index c6d818bf9..2ed2d2987 100644
--- a/modules/squidlog/squidlog_test.go
+++ b/modules/squidlog/squidlog_test.go
@@ -16,11 +16,24 @@ import (
)
var (
- nativeFormatAccessLog, _ = os.ReadFile("testdata/access.log")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataNativeFormatAccessLog, _ = os.ReadFile("testdata/access.log")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, nativeFormatAccessLog)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataNativeFormatAccessLog": dataNativeFormatAccessLog,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSquidLog_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &SquidLog{}, dataConfigJSON, dataConfigYAML)
}
func TestNew(t *testing.T) {
@@ -30,7 +43,7 @@ func TestNew(t *testing.T) {
func TestSquidLog_Init(t *testing.T) {
squidlog := New()
- assert.True(t, squidlog.Init())
+ assert.NoError(t, squidlog.Init())
}
func TestSquidLog_Check(t *testing.T) {
@@ -40,28 +53,28 @@ func TestSquidLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) {
squid := New()
defer squid.Cleanup()
squid.Path = "testdata/not_exists.log"
- require.True(t, squid.Init())
+ require.NoError(t, squid.Init())
- assert.False(t, squid.Check())
+ assert.Error(t, squid.Check())
}
func TestSquid_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) {
squid := New()
defer squid.Cleanup()
squid.Path = "testdata/unknown.log"
- require.True(t, squid.Init())
+ require.NoError(t, squid.Init())
- assert.False(t, squid.Check())
+ assert.Error(t, squid.Check())
}
func TestSquid_Check_ErrorOnCreatingParserZeroKnownFields(t *testing.T) {
squid := New()
defer squid.Cleanup()
squid.Path = "testdata/access.log"
- squid.Parser.CSV.Format = "$one $two"
- require.True(t, squid.Init())
+ squid.ParserConfig.CSV.Format = "$one $two"
+ require.NoError(t, squid.Init())
- assert.False(t, squid.Check())
+ assert.Error(t, squid.Check())
}
func TestSquidLog_Charts(t *testing.T) {
@@ -280,11 +293,11 @@ func prepareSquidCollect(t *testing.T) *SquidLog {
t.Helper()
squid := New()
squid.Path = "testdata/access.log"
- require.True(t, squid.Init())
- require.True(t, squid.Check())
+ require.NoError(t, squid.Init())
+ require.NoError(t, squid.Check())
defer squid.Cleanup()
- p, err := logs.NewCSVParser(squid.Parser.CSV, bytes.NewReader(nativeFormatAccessLog))
+ p, err := logs.NewCSVParser(squid.ParserConfig.CSV, bytes.NewReader(dataNativeFormatAccessLog))
require.NoError(t, err)
squid.parser = p
return squid
diff --git a/modules/squidlog/testdata/config.json b/modules/squidlog/testdata/config.json
new file mode 100644
index 000000000..5d563cc7e
--- /dev/null
+++ b/modules/squidlog/testdata/config.json
@@ -0,0 +1,27 @@
+{
+ "update_every": 123,
+ "path": "ok",
+ "exclude_path": "ok",
+ "log_type": "ok",
+ "csv_config": {
+ "fields_per_record": 123,
+ "delimiter": "ok",
+ "trim_leading_space": true,
+ "format": "ok"
+ },
+ "ltsv_config": {
+ "field_delimiter": "ok",
+ "value_delimiter": "ok",
+ "mapping": {
+ "ok": "ok"
+ }
+ },
+ "regexp_config": {
+ "pattern": "ok"
+ },
+ "json_config": {
+ "mapping": {
+ "ok": "ok"
+ }
+ }
+}
diff --git a/modules/squidlog/testdata/config.yaml b/modules/squidlog/testdata/config.yaml
new file mode 100644
index 000000000..701205e23
--- /dev/null
+++ b/modules/squidlog/testdata/config.yaml
@@ -0,0 +1,19 @@
+update_every: 123
+path: "ok"
+exclude_path: "ok"
+log_type: "ok"
+csv_config:
+ fields_per_record: 123
+ delimiter: "ok"
+ trim_leading_space: yes
+ format: "ok"
+ltsv_config:
+ field_delimiter: "ok"
+ value_delimiter: "ok"
+ mapping:
+ ok: "ok"
+regexp_config:
+ pattern: "ok"
+json_config:
+ mapping:
+ ok: "ok"
diff --git a/modules/supervisord/config_schema.json b/modules/supervisord/config_schema.json
index d3617c94a..e53004ebb 100644
--- a/modules/supervisord/config_schema.json
+++ b/modules/supervisord/config_schema.json
@@ -1,21 +1,82 @@
{
- "$id": "https://example.com/person.schema.json",
- "$schema": "https://json-schema.org/draft/2020-12/schema",
- "title": "Supervisord collector job configuration",
- "type": "object",
- "properties": {
- "firstName": {
- "type": "string",
- "description": "The person's first name."
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Supervisord collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Supervisord XML-RPC interface.",
+ "type": "string",
+ "default": "http://127.0.0.1:9001/RPC2"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "lastName": {
- "type": "string",
- "description": "The person's last name."
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "age": {
- "description": "Age in years which must be equal to or greater than zero.",
- "type": "integer",
- "minimum": 0
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
}
}
}
diff --git a/modules/supervisord/init.go b/modules/supervisord/init.go
index 0c5285c3b..1c401bcd6 100644
--- a/modules/supervisord/init.go
+++ b/modules/supervisord/init.go
@@ -10,14 +10,14 @@ import (
"github.com/netdata/go.d.plugin/pkg/web"
)
-func (s Supervisord) verifyConfig() error {
+func (s *Supervisord) verifyConfig() error {
if s.URL == "" {
return errors.New("'url' not set")
}
return nil
}
-func (s Supervisord) initSupervisorClient() (supervisorClient, error) {
+func (s *Supervisord) initSupervisorClient() (supervisorClient, error) {
u, err := url.Parse(s.URL)
if err != nil {
return nil, fmt.Errorf("parse 'url': %v (%s)", err, s.URL)
diff --git a/modules/supervisord/supervisord.go b/modules/supervisord/supervisord.go
index 1c9994710..84096808d 100644
--- a/modules/supervisord/supervisord.go
+++ b/modules/supervisord/supervisord.go
@@ -4,6 +4,7 @@ package supervisord
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -25,7 +26,7 @@ func New() *Supervisord {
Config: Config{
URL: "http://127.0.0.1:9001/RPC2",
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
@@ -35,18 +36,20 @@ func New() *Supervisord {
}
type Config struct {
- URL string `yaml:"url"`
- web.Client `yaml:",inline"`
+ web.Client `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ URL string `yaml:"url" json:"url"`
}
type (
Supervisord struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
- client supervisorClient
charts *module.Charts
+ client supervisorClient
+
cache map[string]map[string]bool // map[group][procName]collected
}
supervisorClient interface {
@@ -55,25 +58,37 @@ type (
}
)
-func (s *Supervisord) Init() bool {
+func (s *Supervisord) Configuration() any {
+ return s.Config
+}
+
+func (s *Supervisord) Init() error {
err := s.verifyConfig()
if err != nil {
s.Errorf("verify config: %v", err)
- return false
+ return err
}
client, err := s.initSupervisorClient()
if err != nil {
s.Errorf("init supervisord client: %v", err)
- return false
+ return err
}
s.client = client
- return true
+ return nil
}
-func (s *Supervisord) Check() bool {
- return len(s.Collect()) > 0
+func (s *Supervisord) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (s *Supervisord) Charts() *module.Charts {
diff --git a/modules/supervisord/supervisord_test.go b/modules/supervisord/supervisord_test.go
index 23ef1ff0c..6e107f629 100644
--- a/modules/supervisord/supervisord_test.go
+++ b/modules/supervisord/supervisord_test.go
@@ -4,14 +4,31 @@ package supervisord
import (
"errors"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Supervisord)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSupervisord_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Supervisord{}, dataConfigJSON, dataConfigYAML)
}
func TestSupervisord_Init(t *testing.T) {
@@ -38,9 +55,9 @@ func TestSupervisord_Init(t *testing.T) {
supvr.Config = test.config
if test.wantFail {
- assert.False(t, supvr.Init())
+ assert.Error(t, supvr.Init())
} else {
- assert.True(t, supvr.Init())
+ assert.NoError(t, supvr.Init())
}
})
}
@@ -69,9 +86,9 @@ func TestSupervisord_Check(t *testing.T) {
defer supvr.Cleanup()
if test.wantFail {
- assert.False(t, supvr.Check())
+ assert.Error(t, supvr.Check())
} else {
- assert.True(t, supvr.Check())
+ assert.NoError(t, supvr.Check())
}
})
}
@@ -79,7 +96,7 @@ func TestSupervisord_Check(t *testing.T) {
func TestSupervisord_Charts(t *testing.T) {
supvr := New()
- require.True(t, supvr.Init())
+ require.NoError(t, supvr.Init())
assert.NotNil(t, supvr.Charts())
}
@@ -88,7 +105,7 @@ func TestSupervisord_Cleanup(t *testing.T) {
supvr := New()
assert.NotPanics(t, supvr.Cleanup)
- require.True(t, supvr.Init())
+ require.NoError(t, supvr.Init())
m := &mockSupervisorClient{}
supvr.client = m
@@ -188,21 +205,21 @@ func ensureCollectedProcessesAddedToCharts(t *testing.T, supvr *Supervisord) {
func prepareSupervisordSuccessOnGetAllProcessInfo(t *testing.T) *Supervisord {
supvr := New()
- require.True(t, supvr.Init())
+ require.NoError(t, supvr.Init())
supvr.client = &mockSupervisorClient{}
return supvr
}
func prepareSupervisordZeroProcessesOnGetAllProcessInfo(t *testing.T) *Supervisord {
supvr := New()
- require.True(t, supvr.Init())
+ require.NoError(t, supvr.Init())
supvr.client = &mockSupervisorClient{returnZeroProcesses: true}
return supvr
}
func prepareSupervisordErrorOnGetAllProcessInfo(t *testing.T) *Supervisord {
supvr := New()
- require.True(t, supvr.Init())
+ require.NoError(t, supvr.Init())
supvr.client = &mockSupervisorClient{errOnGetAllProcessInfo: true}
return supvr
}
diff --git a/modules/supervisord/testdata/config.json b/modules/supervisord/testdata/config.json
new file mode 100644
index 000000000..825b0c394
--- /dev/null
+++ b/modules/supervisord/testdata/config.json
@@ -0,0 +1,11 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "proxy_url": "ok",
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/supervisord/testdata/config.yaml b/modules/supervisord/testdata/config.yaml
new file mode 100644
index 000000000..e1a01abd7
--- /dev/null
+++ b/modules/supervisord/testdata/config.yaml
@@ -0,0 +1,9 @@
+update_every: 123
+url: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+proxy_url: "ok"
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/systemdunits/collect.go b/modules/systemdunits/collect.go
index 2843a4230..eb596605f 100644
--- a/modules/systemdunits/collect.go
+++ b/modules/systemdunits/collect.go
@@ -148,7 +148,7 @@ func (s *SystemdUnits) getSystemdVersion(conn systemdConnection) (int, error) {
}
func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus, error) {
- ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration())
defer cancel()
s.Debugf("calling function 'ListUnits'")
@@ -169,7 +169,7 @@ func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus
}
func (s *SystemdUnits) getLoadedUnitsByPatterns(conn systemdConnection) ([]dbus.UnitStatus, error) {
- ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration)
+ ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration())
defer cancel()
s.Debugf("calling function 'ListUnitsByPatterns'")
diff --git a/modules/systemdunits/config_schema.json b/modules/systemdunits/config_schema.json
index 5a9df2571..72641454e 100644
--- a/modules/systemdunits/config_schema.json
+++ b/modules/systemdunits/config_schema.json
@@ -1,27 +1,51 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/systemdunits job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "include": {
- "type": "array",
- "items": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Systemdunits collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The frequency, in seconds, at which data is collected from systemd.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 10
},
- "minItems": 1
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for connecting and querying systemd's D-Bus endpoint.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 2
+ },
+ "include": {
+ "title": "Include",
+ "description": "Configuration for monitoring specific systemd units. Include systemd units whose names match any of the specified patterns. Patterns follow the syntax of shell file name patterns.",
+ "type": "array",
+ "uniqueItems": true,
+ "minItems": 1,
+ "items": {
+ "title": "Unit pattern",
+ "type": "string"
+ },
+ "default": [
+ "*.service"
+ ]
+ }
+ },
+ "required": [
+ "include"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
+ },
+ "include": {
+ "ui:listFlavour": "list"
}
- },
- "required": [
- "name",
- "include"
- ]
-}
\ No newline at end of file
+ }
+}
diff --git a/modules/systemdunits/systemdunits.go b/modules/systemdunits/systemdunits.go
index 3593b531e..ca04c3edb 100644
--- a/modules/systemdunits/systemdunits.go
+++ b/modules/systemdunits/systemdunits.go
@@ -7,6 +7,7 @@ package systemdunits
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -30,10 +31,10 @@ func init() {
func New() *SystemdUnits {
return &SystemdUnits{
Config: Config{
+ Timeout: web.Duration(time.Second * 2),
Include: []string{
"*.service",
},
- Timeout: web.Duration{Duration: time.Second * 2},
},
charts: &module.Charts{},
@@ -43,13 +44,14 @@ func New() *SystemdUnits {
}
type Config struct {
- Include []string `yaml:"include"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ Include []string `yaml:"include" json:"include"`
}
type SystemdUnits struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
client systemdClient
conn systemdConnection
@@ -61,27 +63,40 @@ type SystemdUnits struct {
charts *module.Charts
}
-func (s *SystemdUnits) Init() bool {
+func (s *SystemdUnits) Configuration() any {
+ return s.Config
+}
+
+func (s *SystemdUnits) Init() error {
err := s.validateConfig()
if err != nil {
s.Errorf("config validation: %v", err)
- return false
+ return err
}
sr, err := s.initSelector()
if err != nil {
s.Errorf("init selector: %v", err)
- return false
+ return err
}
s.sr = sr
s.Debugf("unit names patterns: %v", s.Include)
s.Debugf("timeout: %s", s.Timeout)
- return true
+
+ return nil
}
-func (s *SystemdUnits) Check() bool {
- return len(s.Collect()) > 0
+func (s *SystemdUnits) Check() error {
+ mx, err := s.collect()
+ if err != nil {
+ s.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (s *SystemdUnits) Charts() *module.Charts {
@@ -89,15 +104,15 @@ func (s *SystemdUnits) Charts() *module.Charts {
}
func (s *SystemdUnits) Collect() map[string]int64 {
- ms, err := s.collect()
+ mx, err := s.collect()
if err != nil {
s.Error(err)
}
- if len(ms) == 0 {
+ if len(mx) == 0 {
return nil
}
- return ms
+ return mx
}
func (s *SystemdUnits) Cleanup() {
diff --git a/modules/systemdunits/systemdunits_test.go b/modules/systemdunits/systemdunits_test.go
index baa9ed46a..6056a2f8b 100644
--- a/modules/systemdunits/systemdunits_test.go
+++ b/modules/systemdunits/systemdunits_test.go
@@ -9,6 +9,7 @@ import (
"context"
"errors"
"fmt"
+ "os"
"path/filepath"
"testing"
@@ -19,8 +20,22 @@ import (
"github.com/stretchr/testify/require"
)
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestSystemdUnits_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &SystemdUnits{}, dataConfigJSON, dataConfigYAML)
}
func TestSystemdUnits_Init(t *testing.T) {
@@ -48,9 +63,9 @@ func TestSystemdUnits_Init(t *testing.T) {
systemd.Config = test.config
if test.wantFail {
- assert.False(t, systemd.Init())
+ assert.Error(t, systemd.Init())
} else {
- assert.True(t, systemd.Init())
+ assert.NoError(t, systemd.Init())
}
})
}
@@ -115,12 +130,12 @@ func TestSystemdUnits_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
systemd := test.prepare()
- require.True(t, systemd.Init())
+ require.NoError(t, systemd.Init())
if test.wantFail {
- assert.False(t, systemd.Check())
+ assert.Error(t, systemd.Check())
} else {
- assert.True(t, systemd.Check())
+ assert.NoError(t, systemd.Check())
}
})
}
@@ -128,7 +143,7 @@ func TestSystemdUnits_Check(t *testing.T) {
func TestSystemdUnits_Charts(t *testing.T) {
systemd := New()
- require.True(t, systemd.Init())
+ require.NoError(t, systemd.Init())
assert.NotNil(t, systemd.Charts())
}
@@ -138,7 +153,7 @@ func TestSystemdUnits_Cleanup(t *testing.T) {
client := prepareOKClient(230)
systemd.client = client
- require.True(t, systemd.Init())
+ require.NoError(t, systemd.Init())
require.NotNil(t, systemd.Collect())
conn := systemd.conn
systemd.Cleanup()
@@ -681,7 +696,7 @@ func TestSystemdUnits_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
systemd := test.prepare()
- require.True(t, systemd.Init())
+ require.NoError(t, systemd.Init())
var collected map[string]int64
@@ -702,7 +717,7 @@ func TestSystemdUnits_connectionReuse(t *testing.T) {
systemd.Include = []string{"*"}
client := prepareOKClient(230)
systemd.client = client
- require.True(t, systemd.Init())
+ require.NoError(t, systemd.Init())
var collected map[string]int64
for i := 0; i < 10; i++ {
diff --git a/modules/systemdunits/testdata/config.json b/modules/systemdunits/testdata/config.json
new file mode 100644
index 000000000..ba8e51f1c
--- /dev/null
+++ b/modules/systemdunits/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "timeout": 123.123,
+ "include": [
+ "ok"
+ ]
+}
diff --git a/modules/systemdunits/testdata/config.yaml b/modules/systemdunits/testdata/config.yaml
new file mode 100644
index 000000000..377e4145d
--- /dev/null
+++ b/modules/systemdunits/testdata/config.yaml
@@ -0,0 +1,4 @@
+update_every: 123
+timeout: 123.123
+include:
+ - "ok"
diff --git a/modules/tengine/config_schema.json b/modules/tengine/config_schema.json
index 30958bb1b..e64d9dec5 100644
--- a/modules/tengine/config_schema.json
+++ b/modules/tengine/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/tengine job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Tengine collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Tengine status page to monitor.",
+ "type": "string",
+ "default": "http://127.0.0.1/us"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/tengine/tengine.go b/modules/tengine/tengine.go
index 169b390ab..48f66675a 100644
--- a/modules/tengine/tengine.go
+++ b/modules/tengine/tengine.go
@@ -4,11 +4,11 @@ package tengine
import (
_ "embed"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -21,73 +21,76 @@ func init() {
})
}
-const (
- defaultURL = "http://127.0.0.1/us"
- defaultHTTPTimeout = time.Second * 2
-)
-
-// New creates Tengine with default values.
func New() *Tengine {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: defaultURL,
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: defaultHTTPTimeout},
+ return &Tengine{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1/us",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 2),
+ },
},
},
+ charts: charts.Copy(),
}
- return &Tengine{Config: config}
}
-// Config is the Tengine module configuration.
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
-// Tengine Tengine module.
type Tengine struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
apiClient *apiClient
}
-// Cleanup makes cleanup.
-func (Tengine) Cleanup() {}
+func (t *Tengine) Configuration() any {
+ return t.Config
+}
-// Init makes initialization.
-func (t *Tengine) Init() bool {
+func (t *Tengine) Init() error {
if t.URL == "" {
- t.Error("URL not set")
- return false
+ t.Error("url not set")
+ return errors.New("url not set")
}
client, err := web.NewHTTPClient(t.Client)
if err != nil {
t.Errorf("error on creating http client : %v", err)
- return false
+ return err
}
t.apiClient = newAPIClient(client, t.Request)
t.Debugf("using URL: %s", t.URL)
- t.Debugf("using timeout: %s", t.Timeout.Duration)
- return true
+ t.Debugf("using timeout: %s", t.Timeout)
+
+ return nil
}
-// Check makes check
-func (t *Tengine) Check() bool {
- return len(t.Collect()) > 0
+func (t *Tengine) Check() error {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-// Charts returns Charts.
-func (t Tengine) Charts() *module.Charts {
- return charts.Copy()
+func (t *Tengine) Charts() *module.Charts {
+ return t.charts
}
-// Collect collects metrics.
func (t *Tengine) Collect() map[string]int64 {
mx, err := t.collect()
@@ -98,3 +101,9 @@ func (t *Tengine) Collect() map[string]int64 {
return mx
}
+
+func (t *Tengine) Cleanup() {
+ if t.apiClient != nil && t.apiClient.httpClient != nil {
+ t.apiClient.httpClient.CloseIdleConnections()
+ }
+}
diff --git a/modules/tengine/tengine_test.go b/modules/tengine/tengine_test.go
index 04fe5f9e7..1be5c2002 100644
--- a/modules/tengine/tengine_test.go
+++ b/modules/tengine/tengine_test.go
@@ -9,28 +9,40 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testStatusData, _ = os.ReadFile("testdata/status.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataStatusMetrics, _ = os.ReadFile("testdata/status.txt")
)
-func TestTengine_Cleanup(t *testing.T) { New().Cleanup() }
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataStatusMetrics": dataStatusMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
-func TestNew(t *testing.T) {
- job := New()
+func TestTengine_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Tengine{}, dataConfigJSON, dataConfigYAML)
+}
- assert.Implements(t, (*module.Module)(nil), job)
- assert.Equal(t, defaultURL, job.URL)
- assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration)
+func TestTengine_Cleanup(t *testing.T) {
+ New().Cleanup()
}
func TestTengine_Init(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
assert.NotNil(t, job.apiClient)
}
@@ -38,22 +50,22 @@ func TestTengine_Check(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusData)
+ _, _ = w.Write(dataStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.True(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.NoError(t, job.Check())
}
func TestTengine_CheckNG(t *testing.T) {
job := New()
job.URL = "http://127.0.0.1:38001/us"
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestTengine_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) }
@@ -62,14 +74,14 @@ func TestTengine_Collect(t *testing.T) {
ts := httptest.NewServer(
http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(testStatusData)
+ _, _ = w.Write(dataStatusMetrics)
}))
defer ts.Close()
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- require.True(t, job.Check())
+ require.NoError(t, job.Init())
+ require.NoError(t, job.Check())
expected := map[string]int64{
"bytes_in": 5944,
@@ -116,8 +128,8 @@ func TestTengine_InvalidData(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
func TestTengine_404(t *testing.T) {
@@ -130,6 +142,6 @@ func TestTengine_404(t *testing.T) {
job := New()
job.URL = ts.URL
- require.True(t, job.Init())
- assert.False(t, job.Check())
+ require.NoError(t, job.Init())
+ assert.Error(t, job.Check())
}
diff --git a/modules/tengine/testdata/config.json b/modules/tengine/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/tengine/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/tengine/testdata/config.yaml b/modules/tengine/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/tengine/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/traefik/config_schema.json b/modules/traefik/config_schema.json
index 0596ef83b..d0da95731 100644
--- a/modules/traefik/config_schema.json
+++ b/modules/traefik/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/traefik job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Traefik collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Traefik metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8082/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/traefik/testdata/config.json b/modules/traefik/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/traefik/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/traefik/testdata/config.yaml b/modules/traefik/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/traefik/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/traefik/traefik.go b/modules/traefik/traefik.go
index a121b0236..d671c15d9 100644
--- a/modules/traefik/traefik.go
+++ b/modules/traefik/traefik.go
@@ -4,6 +4,7 @@ package traefik
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -29,7 +30,7 @@ func New() *Traefik {
URL: "http://127.0.0.1:8082/metrics",
},
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ Timeout: web.Duration(time.Second),
},
},
},
@@ -43,16 +44,19 @@ func New() *Traefik {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type (
Traefik struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
+
+ prom prometheus.Prometheus
- prom prometheus.Prometheus
- charts *module.Charts
checkMetrics bool
cache *cache
}
@@ -73,24 +77,36 @@ type (
}
)
-func (t *Traefik) Init() bool {
+func (t *Traefik) Configuration() any {
+ return t.Config
+}
+
+func (t *Traefik) Init() error {
if err := t.validateConfig(); err != nil {
t.Errorf("config validation: %v", err)
- return false
+ return err
}
prom, err := t.initPrometheusClient()
if err != nil {
t.Errorf("prometheus client initialization: %v", err)
- return false
+ return err
}
t.prom = prom
- return true
+ return nil
}
-func (t *Traefik) Check() bool {
- return len(t.Collect()) > 0
+func (t *Traefik) Check() error {
+ mx, err := t.collect()
+ if err != nil {
+ t.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (t *Traefik) Charts() *module.Charts {
diff --git a/modules/traefik/traefik_test.go b/modules/traefik/traefik_test.go
index c5804b672..a854fd704 100644
--- a/modules/traefik/traefik_test.go
+++ b/modules/traefik/traefik_test.go
@@ -8,6 +8,7 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
@@ -16,19 +17,24 @@ import (
)
var (
- v221Metrics, _ = os.ReadFile("testdata/v2.2.1/metrics.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer221Metrics, _ = os.ReadFile("testdata/v2.2.1/metrics.txt")
)
-func Test_Testdata(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v2.2.1_Metrics": v221Metrics,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer221Metrics": dataVer221Metrics,
} {
- require.NotNilf(t, data, name)
+ require.NotNil(t, data, name)
}
}
-func TestNew(t *testing.T) {
- assert.IsType(t, (*Traefik)(nil), New())
+func TestTraefik_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Traefik{}, dataConfigJSON, dataConfigYAML)
}
func TestTraefik_Init(t *testing.T) {
@@ -62,9 +68,9 @@ func TestTraefik_Init(t *testing.T) {
rdb.Config = test.config
if test.wantFail {
- assert.False(t, rdb.Init())
+ assert.Error(t, rdb.Init())
} else {
- assert.True(t, rdb.Init())
+ assert.NoError(t, rdb.Init())
}
})
}
@@ -107,9 +113,9 @@ func TestTraefik_Check(t *testing.T) {
defer cleanup()
if test.wantFail {
- assert.False(t, tk.Check())
+ assert.Error(t, tk.Check())
} else {
- assert.True(t, tk.Check())
+ assert.NoError(t, tk.Check())
}
})
}
@@ -251,11 +257,11 @@ func prepareCaseTraefikV221Metrics(t *testing.T) (*Traefik, func()) {
t.Helper()
srv := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(v221Metrics)
+ _, _ = w.Write(dataVer221Metrics)
}))
h := New()
h.URL = srv.URL
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, srv.Close
}
@@ -292,7 +298,7 @@ traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",me
}))
h := New()
h.URL = srv.URL
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, srv.Close
}
@@ -320,7 +326,7 @@ application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"}
}))
h := New()
h.URL = srv.URL
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, srv.Close
}
@@ -333,7 +339,7 @@ func prepareCase404Response(t *testing.T) (*Traefik, func()) {
}))
h := New()
h.URL = srv.URL
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, srv.Close
}
@@ -342,7 +348,7 @@ func prepareCaseConnectionRefused(t *testing.T) (*Traefik, func()) {
t.Helper()
h := New()
h.URL = "http://127.0.0.1:38001"
- require.True(t, h.Init())
+ require.NoError(t, h.Init())
return h, func() {}
}
diff --git a/modules/unbound/config_schema.json b/modules/unbound/config_schema.json
index 290905ac0..f85e2c3c6 100644
--- a/modules/unbound/config_schema.json
+++ b/modules/unbound/config_schema.json
@@ -1,44 +1,106 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/unbound job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Unbound collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The frequency, in seconds, at which data is collected from the Unbound server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Unbound server listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:8953"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout duration, in seconds, for connection, read, write, and SSL handshake operations.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "conf_path": {
+ "title": "Path to unbound.conf",
+ "description": "The absolute path to the Unbound configuration file. Providing this path enables the tool to make adjustments based on the 'remote-control' section.",
+ "type": "string",
+ "default": "/etc/unbound/unbound.conf"
+ },
+ "cumulative_stats": {
+ "title": "Cumulative stats",
+ "description": "Specifies whether statistics collection mode is enabled. Should match the 'statistics-cumulative' parameter in unbound.conf.",
+ "type": "boolean",
+ "default": false
+ },
+ "use_tls": {
+ "title": "Use TLS",
+ "description": "Indicates whether TLS should be used for secure communication.",
+ "type": "boolean",
+ "default": true
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean",
+ "default": true
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string",
+ "default": "/etc/unbound/unbound_control.pem"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string",
+ "default": "/etc/unbound/unbound_control.key"
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "conf_path": {
- "type": "string"
- },
- "cumulative_stats": {
- "type": "boolean"
- },
- "use_tls": {
- "type": "boolean"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "tls_skip_verify": {
- "type": "boolean"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout",
+ "conf_path",
+ "cumulative_stats"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "use_tls",
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/unbound/init.go b/modules/unbound/init.go
index 6ae9543f3..bca49f027 100644
--- a/modules/unbound/init.go
+++ b/modules/unbound/init.go
@@ -87,9 +87,9 @@ func (u *Unbound) initClient() (err error) {
u.client = socket.New(socket.Config{
Address: u.Address,
- ConnectTimeout: u.Timeout.Duration,
- ReadTimeout: u.Timeout.Duration,
- WriteTimeout: u.Timeout.Duration,
+ ConnectTimeout: u.Timeout.Duration(),
+ ReadTimeout: u.Timeout.Duration(),
+ WriteTimeout: u.Timeout.Duration(),
TLSConf: tlsCfg,
})
return nil
diff --git a/modules/unbound/metadata.yaml b/modules/unbound/metadata.yaml
index 3e42aecfc..ec6e6538d 100644
--- a/modules/unbound/metadata.yaml
+++ b/modules/unbound/metadata.yaml
@@ -94,7 +94,7 @@ modules:
required: false
- name: cumulative_stats
description: Statistics collection mode. Should have the same value as the `statistics-cumulative` parameter in the unbound configuration file.
- default_value: /etc/unbound/unbound.conf
+ default_value: false
required: false
- name: use_tls
description: Whether to use TLS or not.
diff --git a/modules/unbound/testdata/config.json b/modules/unbound/testdata/config.json
new file mode 100644
index 000000000..9874de180
--- /dev/null
+++ b/modules/unbound/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "conf_path": "ok",
+ "timeout": 123.123,
+ "cumulative_stats": true,
+ "use_tls": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/unbound/testdata/config.yaml b/modules/unbound/testdata/config.yaml
new file mode 100644
index 000000000..68326cabc
--- /dev/null
+++ b/modules/unbound/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+address: "ok"
+conf_path: "ok"
+timeout: 123.123
+cumulative_stats: yes
+use_tls: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/unbound/unbound.go b/modules/unbound/unbound.go
index 625ef75cd..066499c09 100644
--- a/modules/unbound/unbound.go
+++ b/modules/unbound/unbound.go
@@ -4,13 +4,13 @@ package unbound
import (
_ "embed"
+ "errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/socket"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -24,60 +24,60 @@ func init() {
}
func New() *Unbound {
- config := Config{
- Address: "127.0.0.1:8953",
- ConfPath: "/etc/unbound/unbound.conf",
- Timeout: web.Duration{Duration: time.Second},
- Cumulative: false,
- UseTLS: true,
- TLSConfig: tlscfg.TLSConfig{
- TLSCert: "/etc/unbound/unbound_control.pem",
- TLSKey: "/etc/unbound/unbound_control.key",
- InsecureSkipVerify: true,
- },
- }
-
return &Unbound{
- Config: config,
+ Config: Config{
+ Address: "127.0.0.1:8953",
+ ConfPath: "/etc/unbound/unbound.conf",
+ Timeout: web.Duration(time.Second),
+ Cumulative: false,
+ UseTLS: true,
+ TLSConfig: tlscfg.TLSConfig{
+ TLSCert: "/etc/unbound/unbound_control.pem",
+ TLSKey: "/etc/unbound/unbound_control.key",
+ InsecureSkipVerify: true,
+ },
+ },
curCache: newCollectCache(),
cache: newCollectCache(),
}
}
-type (
- Config struct {
- Address string `yaml:"address"`
- ConfPath string `yaml:"conf_path"`
- Timeout web.Duration `yaml:"timeout"`
- Cumulative bool `yaml:"cumulative_stats"`
- UseTLS bool `yaml:"use_tls"`
- tlscfg.TLSConfig `yaml:",inline"`
- }
- Unbound struct {
- module.Base
- Config `yaml:",inline"`
+type Config struct {
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ ConfPath string `yaml:"conf_path" json:"conf_path"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ Cumulative bool `yaml:"cumulative_stats" json:"cumulative_stats"`
+ UseTLS bool `yaml:"use_tls" json:"use_tls"`
+}
- client socket.Client
- cache collectCache
- curCache collectCache
+type Unbound struct {
+ module.Base
+ Config `yaml:",inline" json:""`
- prevCacheMiss float64 // needed for cumulative mode
- extChartsCreated bool
+ charts *module.Charts
- charts *module.Charts
- }
-)
+ client socket.Client
+
+ cache collectCache
+ curCache collectCache
+ prevCacheMiss float64 // needed for cumulative mode
+ extChartsCreated bool
+}
-func (Unbound) Cleanup() {}
+func (u *Unbound) Configuration() any {
+ return u.Config
+}
-func (u *Unbound) Init() bool {
+func (u *Unbound) Init() error {
if enabled := u.initConfig(); !enabled {
- return false
+ return errors.New("remote control is disabled in the configuration file")
}
if err := u.initClient(); err != nil {
u.Errorf("creating client: %v", err)
- return false
+ return err
}
u.charts = charts(u.Cumulative)
@@ -86,14 +86,23 @@ func (u *Unbound) Init() bool {
if u.UseTLS {
u.Debugf("using tls_skip_verify: %v, tls_key: %s, tls_cert: %s", u.InsecureSkipVerify, u.TLSKey, u.TLSCert)
}
- return true
+
+ return nil
}
-func (u *Unbound) Check() bool {
- return len(u.Collect()) > 0
+func (u *Unbound) Check() error {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-func (u Unbound) Charts() *module.Charts {
+func (u *Unbound) Charts() *module.Charts {
return u.charts
}
@@ -108,3 +117,9 @@ func (u *Unbound) Collect() map[string]int64 {
}
return mx
}
+
+func (u *Unbound) Cleanup() {
+ if u.client != nil {
+ _ = u.client.Disconnect()
+ }
+}
diff --git a/modules/unbound/unbound_test.go b/modules/unbound/unbound_test.go
index fabea299d..2ada6d689 100644
--- a/modules/unbound/unbound_test.go
+++ b/modules/unbound/unbound_test.go
@@ -11,51 +11,53 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/socket"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- commonStatsData, _ = os.ReadFile("testdata/stats/common.txt")
- extStatsData, _ = os.ReadFile("testdata/stats/extended.txt")
- lifeCycleCumulativeData1, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended1.txt")
- lifeCycleCumulativeData2, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended2.txt")
- lifeCycleCumulativeData3, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended3.txt")
- lifeCycleResetData1, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended1.txt")
- lifeCycleResetData2, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended2.txt")
- lifeCycleResetData3, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended3.txt")
-)
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, commonStatsData)
- assert.NotNil(t, extStatsData)
- assert.NotNil(t, lifeCycleCumulativeData1)
- assert.NotNil(t, lifeCycleCumulativeData2)
- assert.NotNil(t, lifeCycleCumulativeData3)
- assert.NotNil(t, lifeCycleResetData1)
- assert.NotNil(t, lifeCycleResetData2)
- assert.NotNil(t, lifeCycleResetData3)
-}
+ dataCommonStats, _ = os.ReadFile("testdata/stats/common.txt")
+ dataExtendedStats, _ = os.ReadFile("testdata/stats/extended.txt")
+ dataLifeCycleCumulative1, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended1.txt")
+ dataLifeCycleCumulative2, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended2.txt")
+ dataLifeCycleCumulative3, _ = os.ReadFile("testdata/stats/lifecycle/cumulative/extended3.txt")
+ dataLifeCycleReset1, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended1.txt")
+ dataLifeCycleReset2, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended2.txt")
+ dataLifeCycleReset3, _ = os.ReadFile("testdata/stats/lifecycle/reset/extended3.txt")
+)
-func nonTLSUnbound() *Unbound {
- unbound := New()
- unbound.ConfPath = ""
- unbound.UseTLS = false
- return unbound
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataCommonStats": dataCommonStats,
+ "dataExtendedStats": dataExtendedStats,
+ "dataLifeCycleCumulative1": dataLifeCycleCumulative1,
+ "dataLifeCycleCumulative2": dataLifeCycleCumulative2,
+ "dataLifeCycleCumulative3": dataLifeCycleCumulative3,
+ "dataLifeCycleReset1": dataLifeCycleReset1,
+ "dataLifeCycleReset2": dataLifeCycleReset2,
+ "dataLifeCycleReset3": dataLifeCycleReset3,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestUnbound_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Unbound{}, dataConfigJSON, dataConfigYAML)
}
func TestUnbound_Init(t *testing.T) {
- unbound := nonTLSUnbound()
+ unbound := prepareNonTLSUnbound()
- assert.True(t, unbound.Init())
+ assert.NoError(t, unbound.Init())
}
func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) {
@@ -74,45 +76,45 @@ func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) {
},
}
- assert.True(t, unbound.Init())
+ assert.NoError(t, unbound.Init())
assert.Equal(t, expectedConfig, unbound.Config)
}
func TestUnbound_Init_DisabledInUnboundConf(t *testing.T) {
- unbound := nonTLSUnbound()
+ unbound := prepareNonTLSUnbound()
unbound.ConfPath = "testdata/unbound_disabled.conf"
- assert.False(t, unbound.Init())
+ assert.Error(t, unbound.Init())
}
func TestUnbound_Init_HandleEmptyConfig(t *testing.T) {
- unbound := nonTLSUnbound()
+ unbound := prepareNonTLSUnbound()
unbound.ConfPath = "testdata/unbound_empty.conf"
- assert.True(t, unbound.Init())
+ assert.NoError(t, unbound.Init())
}
func TestUnbound_Init_HandleNonExistentConfig(t *testing.T) {
- unbound := nonTLSUnbound()
+ unbound := prepareNonTLSUnbound()
unbound.ConfPath = "testdata/unbound_non_existent.conf"
- assert.True(t, unbound.Init())
+ assert.NoError(t, unbound.Init())
}
func TestUnbound_Check(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
- unbound.client = mockUnboundClient{data: commonStatsData, err: false}
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: dataCommonStats, err: false}
- assert.True(t, unbound.Check())
+ assert.NoError(t, unbound.Check())
}
func TestUnbound_Check_ErrorDuringScrapingUnbound(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
unbound.client = mockUnboundClient{err: true}
- assert.False(t, unbound.Check())
+ assert.Error(t, unbound.Check())
}
func TestUnbound_Cleanup(t *testing.T) {
@@ -120,16 +122,16 @@ func TestUnbound_Cleanup(t *testing.T) {
}
func TestUnbound_Charts(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
assert.NotNil(t, unbound.Charts())
}
func TestUnbound_Collect(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
- unbound.client = mockUnboundClient{data: commonStatsData, err: false}
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: dataCommonStats, err: false}
collected := unbound.Collect()
assert.Equal(t, expectedCommon, collected)
@@ -137,9 +139,9 @@ func TestUnbound_Collect(t *testing.T) {
}
func TestUnbound_Collect_ExtendedStats(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
- unbound.client = mockUnboundClient{data: extStatsData, err: false}
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
+ unbound.client = mockUnboundClient{data: dataExtendedStats, err: false}
collected := unbound.Collect()
assert.Equal(t, expectedExtended, collected)
@@ -151,14 +153,14 @@ func TestUnbound_Collect_LifeCycleCumulativeExtendedStats(t *testing.T) {
input []byte
expected map[string]int64
}{
- {input: lifeCycleCumulativeData1, expected: expectedCumulative1},
- {input: lifeCycleCumulativeData2, expected: expectedCumulative2},
- {input: lifeCycleCumulativeData3, expected: expectedCumulative3},
+ {input: dataLifeCycleCumulative1, expected: expectedCumulative1},
+ {input: dataLifeCycleCumulative2, expected: expectedCumulative2},
+ {input: dataLifeCycleCumulative3, expected: expectedCumulative3},
}
- unbound := nonTLSUnbound()
+ unbound := prepareNonTLSUnbound()
unbound.Cumulative = true
- require.True(t, unbound.Init())
+ require.NoError(t, unbound.Init())
ubClient := &mockUnboundClient{err: false}
unbound.client = ubClient
@@ -179,14 +181,14 @@ func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) {
input []byte
expected map[string]int64
}{
- {input: lifeCycleResetData1, expected: expectedReset1},
- {input: lifeCycleResetData2, expected: expectedReset2},
- {input: lifeCycleResetData3, expected: expectedReset3},
+ {input: dataLifeCycleReset1, expected: expectedReset1},
+ {input: dataLifeCycleReset2, expected: expectedReset2},
+ {input: dataLifeCycleReset3, expected: expectedReset3},
}
- unbound := nonTLSUnbound()
+ unbound := prepareNonTLSUnbound()
unbound.Cumulative = false
- require.True(t, unbound.Init())
+ require.NoError(t, unbound.Init())
ubClient := &mockUnboundClient{err: false}
unbound.client = ubClient
@@ -203,38 +205,46 @@ func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) {
}
func TestUnbound_Collect_EmptyResponse(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
unbound.client = mockUnboundClient{data: []byte{}, err: false}
assert.Nil(t, unbound.Collect())
}
func TestUnbound_Collect_ErrorResponse(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
unbound.client = mockUnboundClient{data: []byte("error unknown command 'unknown'"), err: false}
assert.Nil(t, unbound.Collect())
}
func TestUnbound_Collect_ErrorOnSend(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
unbound.client = mockUnboundClient{err: true}
assert.Nil(t, unbound.Collect())
}
func TestUnbound_Collect_ErrorOnParseBadSyntax(t *testing.T) {
- unbound := nonTLSUnbound()
- require.True(t, unbound.Init())
+ unbound := prepareNonTLSUnbound()
+ require.NoError(t, unbound.Init())
data := strings.Repeat("zk_avg_latency 0\nzk_min_latency 0\nzk_mix_latency 0\n", 10)
unbound.client = mockUnboundClient{data: []byte(data), err: false}
assert.Nil(t, unbound.Collect())
}
+func prepareNonTLSUnbound() *Unbound {
+ unbound := New()
+ unbound.ConfPath = ""
+ unbound.UseTLS = false
+
+ return unbound
+}
+
type mockUnboundClient struct {
data []byte
err bool
diff --git a/modules/upsd/client.go b/modules/upsd/client.go
index be0148bc5..cf67acdf6 100644
--- a/modules/upsd/client.go
+++ b/modules/upsd/client.go
@@ -29,9 +29,9 @@ type upsUnit struct {
func newUpsdConn(conf Config) upsdConn {
return &upsdClient{conn: socket.New(socket.Config{
- ConnectTimeout: conf.Timeout.Duration,
- ReadTimeout: conf.Timeout.Duration,
- WriteTimeout: conf.Timeout.Duration,
+ ConnectTimeout: conf.Timeout.Duration(),
+ ReadTimeout: conf.Timeout.Duration(),
+ WriteTimeout: conf.Timeout.Duration(),
Address: conf.Address,
})}
}
diff --git a/modules/upsd/config_schema.json b/modules/upsd/config_schema.json
index 49fc85354..20374fc2f 100644
--- a/modules/upsd/config_schema.json
+++ b/modules/upsd/config_schema.json
@@ -1,29 +1,81 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/upsd job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "UPSd collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the UPSd daemon listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:3493"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "Connection, read, and write timeout duration in seconds.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for authentication (if required).",
+ "type": "string"
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for authentication (if required).",
+ "type": "string"
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address"
+ ],
+ "dependencies": {
+ "username": [
+ "password"
+ ],
+ "password": [
+ "username"
+ ]
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "username": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
"password": {
- "type": "string"
+ "ui:widget": "password"
},
- "timeout": {
- "type": [
- "string",
- "integer"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ }
]
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/upsd/testdata/config.json b/modules/upsd/testdata/config.json
new file mode 100644
index 000000000..ab7a8654c
--- /dev/null
+++ b/modules/upsd/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "username": "ok",
+ "password": "ok",
+ "timeout": 123.123
+}
diff --git a/modules/upsd/testdata/config.yaml b/modules/upsd/testdata/config.yaml
new file mode 100644
index 000000000..c29848583
--- /dev/null
+++ b/modules/upsd/testdata/config.yaml
@@ -0,0 +1,6 @@
+update_every: 123
+address: "ok"
+username: "ok"
+password: "ok"
+timeout: 123.123
+
diff --git a/modules/upsd/upsd.go b/modules/upsd/upsd.go
index ebe0f36bc..f27154f46 100644
--- a/modules/upsd/upsd.go
+++ b/modules/upsd/upsd.go
@@ -3,15 +3,21 @@
package upsd
import (
+ _ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/web"
)
+//go:embed "config_schema.json"
+var configSchema string
+
func init() {
module.Register("upsd", module.Creator{
- Create: func() module.Module { return New() },
+ JobConfigSchema: configSchema,
+ Create: func() module.Module { return New() },
})
}
@@ -19,7 +25,7 @@ func New() *Upsd {
return &Upsd{
Config: Config{
Address: "127.0.0.1:3493",
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second * 2),
},
newUpsdConn: newUpsdConn,
charts: &module.Charts{},
@@ -28,22 +34,22 @@ func New() *Upsd {
}
type Config struct {
- Address string `yaml:"address"`
- Username string `yaml:"username"`
- Password string `yaml:"password"`
- Timeout web.Duration `yaml:"timeout"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Username string `yaml:"username" json:"username"`
+ Password string `yaml:"password" json:"password"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
}
type (
Upsd struct {
module.Base
-
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- newUpsdConn func(Config) upsdConn
conn upsdConn
+ newUpsdConn func(Config) upsdConn
upsUnits map[string]bool
}
@@ -56,17 +62,29 @@ type (
}
)
-func (u *Upsd) Init() bool {
+func (u *Upsd) Configuration() any {
+ return u.Config
+}
+
+func (u *Upsd) Init() error {
if u.Address == "" {
u.Error("config: 'address' not set")
- return false
+ return errors.New("address not set")
}
- return true
+ return nil
}
-func (u *Upsd) Check() bool {
- return len(u.Collect()) > 0
+func (u *Upsd) Check() error {
+ mx, err := u.collect()
+ if err != nil {
+ u.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (u *Upsd) Charts() *module.Charts {
diff --git a/modules/upsd/upsd_test.go b/modules/upsd/upsd_test.go
index 74c8626f1..5d446bd10 100644
--- a/modules/upsd/upsd_test.go
+++ b/modules/upsd/upsd_test.go
@@ -5,12 +5,33 @@ package upsd
import (
"errors"
"fmt"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestUpsd_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Upsd{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestUpsd_Cleanup(t *testing.T) {
upsd := New()
@@ -19,7 +40,7 @@ func TestUpsd_Cleanup(t *testing.T) {
mock := prepareMockConnOK()
upsd.newUpsdConn = func(Config) upsdConn { return mock }
- require.True(t, upsd.Init())
+ require.NoError(t, upsd.Init())
_ = upsd.Collect()
require.NotPanics(t, upsd.Cleanup)
assert.True(t, mock.calledDisconnect)
@@ -46,9 +67,9 @@ func TestUpsd_Init(t *testing.T) {
upsd.Config = test.config
if test.wantFail {
- assert.False(t, upsd.Init())
+ assert.Error(t, upsd.Init())
} else {
- assert.True(t, upsd.Init())
+ assert.NoError(t, upsd.Init())
}
})
}
@@ -92,12 +113,12 @@ func TestUpsd_Check(t *testing.T) {
upsd := test.prepareUpsd()
upsd.newUpsdConn = func(Config) upsdConn { return test.prepareMock() }
- require.True(t, upsd.Init())
+ require.NoError(t, upsd.Init())
if test.wantFail {
- assert.False(t, upsd.Check())
+ assert.Error(t, upsd.Check())
} else {
- assert.True(t, upsd.Check())
+ assert.NoError(t, upsd.Check())
}
})
}
@@ -105,7 +126,7 @@ func TestUpsd_Check(t *testing.T) {
func TestUpsd_Charts(t *testing.T) {
upsd := New()
- require.True(t, upsd.Init())
+ require.NoError(t, upsd.Init())
assert.NotNil(t, upsd.Charts())
}
@@ -225,7 +246,7 @@ func TestUpsd_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
upsd := test.prepareUpsd()
- require.True(t, upsd.Init())
+ require.NoError(t, upsd.Init())
mock := test.prepareMock()
upsd.newUpsdConn = func(Config) upsdConn { return mock }
diff --git a/modules/vcsa/config_schema.json b/modules/vcsa/config_schema.json
index aab0647ab..80182867f 100644
--- a/modules/vcsa/config_schema.json
+++ b/modules/vcsa/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/vcsa job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "vCenter Server Appliance collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the VCSA server.",
+ "type": "string",
+ "default": ""
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/vcsa/testdata/config.json b/modules/vcsa/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/vcsa/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/vcsa/testdata/config.yaml b/modules/vcsa/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/vcsa/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/vcsa/vcsa.go b/modules/vcsa/vcsa.go
index ccac96f3a..5b1e56662 100644
--- a/modules/vcsa/vcsa.go
+++ b/modules/vcsa/vcsa.go
@@ -4,11 +4,11 @@ package vcsa
import (
_ "embed"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/web"
-
"github.com/netdata/go.d.plugin/agent/module"
+ "github.com/netdata/go.d.plugin/pkg/web"
)
//go:embed "config_schema.json"
@@ -29,7 +29,7 @@ func New() *VCSA {
Config: Config{
HTTP: web.HTTP{
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second * 5),
},
},
},
@@ -38,17 +38,18 @@ func New() *VCSA {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type (
VCSA struct {
module.Base
- Config `yaml:",inline"`
-
- client healthClient
+ Config `yaml:",inline" json:""`
charts *module.Charts
+
+ client healthClient
}
healthClient interface {
@@ -66,33 +67,47 @@ type (
}
)
-func (vc *VCSA) Init() bool {
+func (vc *VCSA) Configuration() any {
+ return vc.Config
+}
+
+func (vc *VCSA) Init() error {
if err := vc.validateConfig(); err != nil {
vc.Error(err)
- return false
+ return err
}
c, err := vc.initHealthClient()
if err != nil {
vc.Errorf("error on creating health client : %vc", err)
- return false
+ return err
}
vc.client = c
vc.Debugf("using URL %s", vc.URL)
- vc.Debugf("using timeout: %s", vc.Timeout.Duration)
+ vc.Debugf("using timeout: %s", vc.Timeout)
- return true
+ return nil
}
-func (vc *VCSA) Check() bool {
+func (vc *VCSA) Check() error {
err := vc.client.Login()
if err != nil {
vc.Error(err)
- return false
+ return err
+ }
+
+ mx, err := vc.collect()
+ if err != nil {
+ vc.Error(err)
+ return err
+ }
+
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
}
- return len(vc.Collect()) > 0
+ return nil
}
func (vc *VCSA) Charts() *module.Charts {
diff --git a/modules/vcsa/vcsa_test.go b/modules/vcsa/vcsa_test.go
index 86185bfa2..fdf90b7d7 100644
--- a/modules/vcsa/vcsa_test.go
+++ b/modules/vcsa/vcsa_test.go
@@ -4,77 +4,84 @@ package vcsa
import (
"errors"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
-func testNewVCSA() *VCSA {
- vc := New()
- vc.URL = "https://127.0.0.1:38001"
- vc.Username = "user"
- vc.Password = "pass"
- return vc
-}
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
-func TestNew(t *testing.T) {
- job := New()
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
- assert.IsType(t, (*VCSA)(nil), job)
+func TestVCSA_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &VCSA{}, dataConfigJSON, dataConfigYAML)
}
func TestVCSA_Init(t *testing.T) {
- job := testNewVCSA()
+ job := prepareVCSA()
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
assert.NotNil(t, job.client)
}
func TestVCenter_InitErrorOnValidatingInitParameters(t *testing.T) {
job := New()
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestVCenter_InitErrorOnCreatingClient(t *testing.T) {
- job := testNewVCSA()
+ job := prepareVCSA()
job.Client.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestVCenter_Check(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
job.client = &mockVCenterHealthClient{}
- assert.True(t, job.Check())
+ assert.NoError(t, job.Check())
}
func TestVCenter_CheckErrorOnLogin(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
job.client = &mockVCenterHealthClient{
login: func() error { return errors.New("login mock error") },
}
- assert.False(t, job.Check())
+ assert.Error(t, job.Check())
}
func TestVCenter_CheckEnsureLoggedIn(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
mock := &mockVCenterHealthClient{}
job.client = mock
- assert.True(t, job.Check())
+ assert.NoError(t, job.Check())
assert.True(t, mock.loginCalls == 1)
}
func TestVCenter_Cleanup(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
mock := &mockVCenterHealthClient{}
job.client = mock
job.Cleanup()
@@ -83,7 +90,7 @@ func TestVCenter_Cleanup(t *testing.T) {
}
func TestVCenter_CleanupWithNilClient(t *testing.T) {
- job := testNewVCSA()
+ job := prepareVCSA()
assert.NotPanics(t, job.Cleanup)
}
@@ -93,8 +100,8 @@ func TestVCenter_Charts(t *testing.T) {
}
func TestVCenter_Collect(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
mock := &mockVCenterHealthClient{}
job.client = mock
@@ -152,8 +159,8 @@ func TestVCenter_Collect(t *testing.T) {
}
func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
mock := &mockVCenterHealthClient{}
job.client = mock
job.Collect()
@@ -162,8 +169,8 @@ func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) {
}
func TestVCenter_CollectErrorOnPing(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
mock := &mockVCenterHealthClient{
ping: func() error { return errors.New("ping mock error") },
}
@@ -173,8 +180,8 @@ func TestVCenter_CollectErrorOnPing(t *testing.T) {
}
func TestVCenter_CollectErrorOnHealthCalls(t *testing.T) {
- job := testNewVCSA()
- require.True(t, job.Init())
+ job := prepareVCSA()
+ require.NoError(t, job.Init())
mock := &mockVCenterHealthClient{
applMgmt: func() (string, error) { return "", errors.New("applMgmt mock error") },
databaseStorage: func() (string, error) { return "", errors.New("databaseStorage mock error") },
@@ -190,6 +197,15 @@ func TestVCenter_CollectErrorOnHealthCalls(t *testing.T) {
assert.Zero(t, job.Collect())
}
+func prepareVCSA() *VCSA {
+ vc := New()
+ vc.URL = "https://127.0.0.1:38001"
+ vc.Username = "user"
+ vc.Password = "pass"
+
+ return vc
+}
+
type mockVCenterHealthClient struct {
login func() error
logout func() error
@@ -231,56 +247,56 @@ func (m *mockVCenterHealthClient) Ping() error {
return m.ping()
}
-func (m mockVCenterHealthClient) ApplMgmt() (string, error) {
+func (m *mockVCenterHealthClient) ApplMgmt() (string, error) {
if m.applMgmt == nil {
return "green", nil
}
return m.applMgmt()
}
-func (m mockVCenterHealthClient) DatabaseStorage() (string, error) {
+func (m *mockVCenterHealthClient) DatabaseStorage() (string, error) {
if m.databaseStorage == nil {
return "green", nil
}
return m.databaseStorage()
}
-func (m mockVCenterHealthClient) Load() (string, error) {
+func (m *mockVCenterHealthClient) Load() (string, error) {
if m.load == nil {
return "green", nil
}
return m.load()
}
-func (m mockVCenterHealthClient) Mem() (string, error) {
+func (m *mockVCenterHealthClient) Mem() (string, error) {
if m.mem == nil {
return "green", nil
}
return m.mem()
}
-func (m mockVCenterHealthClient) SoftwarePackages() (string, error) {
+func (m *mockVCenterHealthClient) SoftwarePackages() (string, error) {
if m.softwarePackages == nil {
return "green", nil
}
return m.softwarePackages()
}
-func (m mockVCenterHealthClient) Storage() (string, error) {
+func (m *mockVCenterHealthClient) Storage() (string, error) {
if m.storage == nil {
return "green", nil
}
return m.storage()
}
-func (m mockVCenterHealthClient) Swap() (string, error) {
+func (m *mockVCenterHealthClient) Swap() (string, error) {
if m.swap == nil {
return "green", nil
}
return m.swap()
}
-func (m mockVCenterHealthClient) System() (string, error) {
+func (m *mockVCenterHealthClient) System() (string, error) {
if m.system == nil {
return "green", nil
}
diff --git a/modules/vernemq/charts.go b/modules/vernemq/charts.go
index 54b86b9bd..7bd20570d 100644
--- a/modules/vernemq/charts.go
+++ b/modules/vernemq/charts.go
@@ -826,9 +826,10 @@ var (
)
func (v *VerneMQ) notifyNewScheduler(name string) {
- if v.cache.hasP(name) {
+ if v.cache[name] {
return
}
+ v.cache[name] = true
id := chartSchedulerUtilization.ID
num := name[len("system_utilization_scheduler_"):]
@@ -841,9 +842,10 @@ func (v *VerneMQ) notifyNewReason(name, reason string) {
return
}
key := join(name, reason)
- if v.cache.hasP(key) {
+ if v.cache[key] {
return
}
+ v.cache[key] = true
var chart Chart
switch name {
diff --git a/modules/vernemq/config_schema.json b/modules/vernemq/config_schema.json
index f21bab451..1383031a4 100644
--- a/modules/vernemq/config_schema.json
+++ b/modules/vernemq/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/vernemq job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "VerneMQ collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the VerneMQ metrics endpoint.",
+ "type": "string",
+ "default": "http://127.0.0.1:8888/metrics"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/vernemq/init.go b/modules/vernemq/init.go
new file mode 100644
index 000000000..573b736ed
--- /dev/null
+++ b/modules/vernemq/init.go
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package vernemq
+
+import (
+ "errors"
+
+ "github.com/netdata/go.d.plugin/pkg/prometheus"
+ "github.com/netdata/go.d.plugin/pkg/web"
+)
+
+func (v *VerneMQ) validateConfig() error {
+ if v.URL == "" {
+ return errors.New("url is not set")
+ }
+ return nil
+}
+
+func (v *VerneMQ) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(v.Client)
+ if err != nil {
+ return nil, err
+ }
+
+ return prometheus.New(client, v.Request), nil
+}
diff --git a/modules/vernemq/testdata/config.json b/modules/vernemq/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/vernemq/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/vernemq/testdata/config.yaml b/modules/vernemq/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/vernemq/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/vernemq/vernemq.go b/modules/vernemq/vernemq.go
index d86f3b118..dc6009535 100644
--- a/modules/vernemq/vernemq.go
+++ b/modules/vernemq/vernemq.go
@@ -7,10 +7,9 @@ import (
"errors"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -24,74 +23,70 @@ func init() {
}
func New() *VerneMQ {
- config := Config{
- HTTP: web.HTTP{
- Request: web.Request{
- URL: "http://127.0.0.1:8888/metrics",
- },
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second},
+ return &VerneMQ{
+ Config: Config{
+ HTTP: web.HTTP{
+ Request: web.Request{
+ URL: "http://127.0.0.1:8888/metrics",
+ },
+ Client: web.Client{
+ Timeout: web.Duration(time.Second),
+ },
},
},
- }
-
- return &VerneMQ{
- Config: config,
charts: charts.Copy(),
- cache: make(cache),
+ cache: make(map[string]bool),
}
}
-type (
- Config struct {
- web.HTTP `yaml:",inline"`
- }
+type Config struct {
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+type (
VerneMQ struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
- prom prometheus.Prometheus
charts *Charts
- cache cache
- }
- cache map[string]bool
+ prom prometheus.Prometheus
+
+ cache map[string]bool
+ }
)
-func (c cache) hasP(v string) bool { ok := c[v]; c[v] = true; return ok }
+func (v *VerneMQ) Configuration() any {
+ return v.Config
+}
-func (v VerneMQ) validateConfig() error {
- if v.URL == "" {
- return errors.New("URL is not set")
+func (v *VerneMQ) Init() error {
+ if err := v.validateConfig(); err != nil {
+ v.Errorf("error on validating config: %v", err)
+ return err
}
- return nil
-}
-func (v *VerneMQ) initClient() error {
- client, err := web.NewHTTPClient(v.Client)
+ prom, err := v.initPrometheusClient()
if err != nil {
+ v.Error(err)
return err
}
+ v.prom = prom
- v.prom = prometheus.New(client, v.Request)
return nil
}
-func (v *VerneMQ) Init() bool {
- if err := v.validateConfig(); err != nil {
- v.Errorf("error on validating config: %v", err)
- return false
+func (v *VerneMQ) Check() error {
+ mx, err := v.collect()
+ if err != nil {
+ v.Error(err)
+ return err
}
- if err := v.initClient(); err != nil {
- v.Errorf("error on initializing client: %v", err)
- return false
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
}
- return true
-}
-
-func (v *VerneMQ) Check() bool {
- return len(v.Collect()) > 0
+ return nil
}
func (v *VerneMQ) Charts() *Charts {
@@ -110,4 +105,8 @@ func (v *VerneMQ) Collect() map[string]int64 {
return mx
}
-func (VerneMQ) Cleanup() {}
+func (v *VerneMQ) Cleanup() {
+ if v.prom != nil && v.prom.HTTPClient() != nil {
+ v.prom.HTTPClient().CloseIdleConnections()
+ }
+}
diff --git a/modules/vernemq/vernemq_test.go b/modules/vernemq/vernemq_test.go
index 5f07553cd..89498e303 100644
--- a/modules/vernemq/vernemq_test.go
+++ b/modules/vernemq/vernemq_test.go
@@ -9,63 +9,74 @@ import (
"testing"
"github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- metricsV1101MQTTv5, _ = os.ReadFile("testdata/metrics-v1.10.1-mqtt5.txt")
- invalidMetrics, _ = os.ReadFile("testdata/non_vernemq.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer1101MQTTv5Metrics, _ = os.ReadFile("testdata/metrics-v1.10.1-mqtt5.txt")
+ dataUnexpectedMetrics, _ = os.ReadFile("testdata/non_vernemq.txt")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, metricsV1101MQTTv5)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer1101MQTTv5Metrics": dataVer1101MQTTv5Metrics,
+ "dataUnexpectedMetrics": dataUnexpectedMetrics,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestVerneMQ_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &VerneMQ{}, dataConfigJSON, dataConfigYAML)
}
func TestVerneMQ_Init(t *testing.T) {
verneMQ := prepareVerneMQ()
- assert.True(t, verneMQ.Init())
+ assert.NoError(t, verneMQ.Init())
}
func TestVerneMQ_Init_ReturnsFalseIfURLIsNotSet(t *testing.T) {
verneMQ := prepareVerneMQ()
verneMQ.URL = ""
- assert.False(t, verneMQ.Init())
+ assert.Error(t, verneMQ.Init())
}
func TestVerneMQ_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) {
verneMQ := prepareVerneMQ()
verneMQ.Client.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, verneMQ.Init())
+ assert.Error(t, verneMQ.Init())
}
func TestVerneMQ_Check(t *testing.T) {
verneMQ, srv := prepareClientServerV1101(t)
defer srv.Close()
- assert.True(t, verneMQ.Check())
+ assert.NoError(t, verneMQ.Check())
}
func TestVerneMQ_Check_ReturnsFalseIfConnectionRefused(t *testing.T) {
verneMQ := prepareVerneMQ()
- require.True(t, verneMQ.Init())
+ require.NoError(t, verneMQ.Init())
- assert.False(t, verneMQ.Check())
+ assert.Error(t, verneMQ.Check())
}
func TestVerneMQ_Check_ReturnsFalseIfMetricsAreNotVerneMQ(t *testing.T) {
verneMQ, srv := prepareClientServerNotVerneMQ(t)
defer srv.Close()
- require.True(t, verneMQ.Init())
+ require.NoError(t, verneMQ.Init())
- assert.False(t, verneMQ.Check())
+ assert.Error(t, verneMQ.Check())
}
func TestVerneMQ_Charts(t *testing.T) {
@@ -87,7 +98,7 @@ func TestVerneMQ_Collect(t *testing.T) {
func TestVerneMQ_Collect_ReturnsNilIfConnectionRefused(t *testing.T) {
verneMQ := prepareVerneMQ()
- require.True(t, verneMQ.Init())
+ require.NoError(t, verneMQ.Init())
assert.Nil(t, verneMQ.Collect())
}
@@ -140,12 +151,12 @@ func prepareClientServerV1101(t *testing.T) (*VerneMQ, *httptest.Server) {
t.Helper()
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(metricsV1101MQTTv5)
+ _, _ = w.Write(dataVer1101MQTTv5Metrics)
}))
verneMQ := New()
verneMQ.URL = ts.URL
- require.True(t, verneMQ.Init())
+ require.NoError(t, verneMQ.Init())
return verneMQ, ts
}
@@ -154,12 +165,12 @@ func prepareClientServerNotVerneMQ(t *testing.T) (*VerneMQ, *httptest.Server) {
t.Helper()
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(invalidMetrics)
+ _, _ = w.Write(dataUnexpectedMetrics)
}))
verneMQ := New()
verneMQ.URL = ts.URL
- require.True(t, verneMQ.Init())
+ require.NoError(t, verneMQ.Init())
return verneMQ, ts
}
@@ -173,7 +184,7 @@ func prepareClientServerInvalid(t *testing.T) (*VerneMQ, *httptest.Server) {
verneMQ := New()
verneMQ.URL = ts.URL
- require.True(t, verneMQ.Init())
+ require.NoError(t, verneMQ.Init())
return verneMQ, ts
}
@@ -187,7 +198,7 @@ func prepareClientServerResponse404(t *testing.T) (*VerneMQ, *httptest.Server) {
verneMQ := New()
verneMQ.URL = ts.URL
- require.True(t, verneMQ.Init())
+ require.NoError(t, verneMQ.Init())
return verneMQ, ts
}
diff --git a/modules/vsphere/config_schema.json b/modules/vsphere/config_schema.json
index 68bd55e1e..2da6d4a85 100644
--- a/modules/vsphere/config_schema.json
+++ b/modules/vsphere/config_schema.json
@@ -1,77 +1,206 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/vsphere job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "discovery_interval": {
- "type": [
- "string",
- "integer"
- ]
- },
- "host_include": {
- "type": "array",
- "items": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "VMware vCenter Server collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 20
+ },
+ "url": {
+ "title": "URL",
+ "description": "The base URL of the VMware vCenter Server.",
"type": "string"
- }
- },
- "vm_include": {
- "type": "array",
- "items": {
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 20
+ },
+ "discovery_interval": {
+ "title": "Discovery interval",
+ "description": "Hosts and VMs discovery interval.",
+ "type": "number",
+ "minimum": 60,
+ "default": 300
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "host_include": {
+ "title": "Host selectors",
+ "description": "Defines the hosts for which metrics will be collected based on the provided selector. The selector format follows the pattern '/Datacenter/Cluster/Host', where Datacenter, Cluster, and Host can each be set using Netdata simple patterns.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "title": "Host selector",
+ "description": "",
+ "type": "string",
+ "pattern": "^/"
+ },
+ "default": [
+ "/*"
+ ]
+ },
+ "vm_include": {
+ "title": "Virtual machine selectors",
+ "description": "Defines the virtual machines for which metrics will be collected based on the provided selector. The selector format follows the pattern '/Datacenter/Cluster/Host/VM', where Datacenter, Cluster, Host, and VM can each be set using Netdata simple patterns.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "title": "VM selector",
+ "description": "",
+ "type": "string",
+ "pattern": "^/"
+ },
+ "default": [
+ "/*"
+ ]
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
+ "required": [
+ "url",
+ "username",
+ "password",
+ "host_include",
+ "vm_include"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "proxy_url": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "proxy_username": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
"proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
+ "ui:widget": "password"
},
- "not_follow_redirects": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
+ "host_include": {
+ "ui:listFlavour": "list"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "vm_include": {
+ "ui:listFlavour": "list"
+ },
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "discovery_interval",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Hosts & VMs selector",
+ "fields": [
+ "host_include",
+ "vm_include"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/vsphere/discover.go b/modules/vsphere/discover.go
index 65555a73b..1ea0a4d6e 100644
--- a/modules/vsphere/discover.go
+++ b/modules/vsphere/discover.go
@@ -14,7 +14,7 @@ func (vs *VSphere) goDiscovery() {
vs.Errorf("error on discovering : %v", err)
}
}
- vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration)
+ vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration())
}
func (vs *VSphere) discoverOnce() error {
diff --git a/modules/vsphere/init.go b/modules/vsphere/init.go
index a0f966220..c17029a6c 100644
--- a/modules/vsphere/init.go
+++ b/modules/vsphere/init.go
@@ -30,7 +30,7 @@ func (vs *VSphere) initClient() (*client.Client, error) {
URL: vs.URL,
User: vs.Username,
Password: vs.Password,
- Timeout: vs.Timeout.Duration,
+ Timeout: vs.Timeout.Duration(),
TLSConfig: vs.Client.TLSConfig,
}
return client.New(config)
diff --git a/modules/vsphere/testdata/config.json b/modules/vsphere/testdata/config.json
new file mode 100644
index 000000000..3e4a77396
--- /dev/null
+++ b/modules/vsphere/testdata/config.json
@@ -0,0 +1,27 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "discovery_interval": 123.123,
+ "host_include": [
+ "ok"
+ ],
+ "vm_include": [
+ "ok"
+ ]
+}
diff --git a/modules/vsphere/testdata/config.yaml b/modules/vsphere/testdata/config.yaml
new file mode 100644
index 000000000..b9543decd
--- /dev/null
+++ b/modules/vsphere/testdata/config.yaml
@@ -0,0 +1,22 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+discovery_interval: 123.123
+host_include:
+ - "ok"
+vm_include:
+ - "ok"
\ No newline at end of file
diff --git a/modules/vsphere/vsphere.go b/modules/vsphere/vsphere.go
index d7af8a495..cb4839dba 100644
--- a/modules/vsphere/vsphere.go
+++ b/modules/vsphere/vsphere.go
@@ -29,20 +29,18 @@ func init() {
}
func New() *VSphere {
- config := Config{
- HTTP: web.HTTP{
- Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 20},
+ return &VSphere{
+ Config: Config{
+ HTTP: web.HTTP{
+ Client: web.Client{
+ Timeout: web.Duration(time.Second * 20),
+ },
},
+ DiscoveryInterval: web.Duration(time.Minute * 5),
+ HostsInclude: []string{"/*"},
+ VMsInclude: []string{"/*"},
},
- DiscoveryInterval: web.Duration{Duration: time.Minute * 5},
- HostsInclude: []string{"/*"},
- VMsInclude: []string{"/*"},
- }
-
- return &VSphere{
- collectionLock: new(sync.RWMutex),
- Config: config,
+ collectionLock: &sync.RWMutex{},
charts: &module.Charts{},
discoveredHosts: make(map[string]int),
discoveredVMs: make(map[string]int),
@@ -51,17 +49,19 @@ func New() *VSphere {
}
type Config struct {
- web.HTTP `yaml:",inline"`
- DiscoveryInterval web.Duration `yaml:"discovery_interval"`
- HostsInclude match.HostIncludes `yaml:"host_include"`
- VMsInclude match.VMIncludes `yaml:"vm_include"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ DiscoveryInterval web.Duration `yaml:"discovery_interval" json:"discovery_interval"`
+ HostsInclude match.HostIncludes `yaml:"host_include" json:"host_include"`
+ VMsInclude match.VMIncludes `yaml:"vm_include" json:"vm_include"`
}
type (
VSphere struct {
module.Base
- UpdateEvery int `yaml:"update_every"`
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
+ charts *module.Charts
discoverer
scraper
@@ -72,7 +72,6 @@ type (
discoveredHosts map[string]int
discoveredVMs map[string]int
charted map[string]bool
- charts *module.Charts
}
discoverer interface {
Discover() (*rs.Resources, error)
@@ -83,39 +82,41 @@ type (
}
)
-func (vs *VSphere) Init() bool {
+func (vs *VSphere) Configuration() any {
+ return vs.Config
+}
+
+func (vs *VSphere) Init() error {
if err := vs.validateConfig(); err != nil {
vs.Errorf("error on validating config: %v", err)
- return false
+ return err
}
vsClient, err := vs.initClient()
if err != nil {
vs.Errorf("error on creating vsphere client: %v", err)
- return false
+ return err
}
- err = vs.initDiscoverer(vsClient)
- if err != nil {
+ if err := vs.initDiscoverer(vsClient); err != nil {
vs.Errorf("error on creating vsphere discoverer: %v", err)
- return false
+ return err
}
vs.initScraper(vsClient)
- err = vs.discoverOnce()
- if err != nil {
+ if err := vs.discoverOnce(); err != nil {
vs.Errorf("error on discovering: %v", err)
- return false
+ return err
}
vs.goDiscovery()
- return true
+ return nil
}
-func (vs *VSphere) Check() bool {
- return true
+func (vs *VSphere) Check() error {
+ return nil
}
func (vs *VSphere) Charts() *module.Charts {
diff --git a/modules/vsphere/vsphere_test.go b/modules/vsphere/vsphere_test.go
index 97c23d5ba..9439f18fb 100644
--- a/modules/vsphere/vsphere_test.go
+++ b/modules/vsphere/vsphere_test.go
@@ -3,32 +3,46 @@ package vsphere
import (
"crypto/tls"
+ "os"
"strings"
"testing"
"time"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/modules/vsphere/discover"
"github.com/netdata/go.d.plugin/modules/vsphere/match"
rs "github.com/netdata/go.d.plugin/modules/vsphere/resources"
+ "github.com/netdata/go.d.plugin/pkg/web"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vmware/govmomi/performance"
"github.com/vmware/govmomi/simulator"
)
-func TestNew(t *testing.T) {
- job := New()
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
- assert.Implements(t, (*module.Module)(nil), job)
+func TestVSphere_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &VSphere{}, dataConfigJSON, dataConfigYAML)
}
func TestVSphere_Init(t *testing.T) {
vSphere, _, teardown := prepareVSphereSim(t)
defer teardown()
- assert.True(t, vSphere.Init())
+ assert.NoError(t, vSphere.Init())
assert.NotNil(t, vSphere.discoverer)
assert.NotNil(t, vSphere.scraper)
assert.NotNil(t, vSphere.resources)
@@ -41,7 +55,7 @@ func TestVSphere_Init_ReturnsFalseIfURLNotSet(t *testing.T) {
defer teardown()
vSphere.URL = ""
- assert.False(t, vSphere.Init())
+ assert.Error(t, vSphere.Init())
}
func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) {
@@ -49,7 +63,7 @@ func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) {
defer teardown()
vSphere.Username = ""
- assert.False(t, vSphere.Init())
+ assert.Error(t, vSphere.Init())
}
func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) {
@@ -57,7 +71,7 @@ func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) {
defer teardown()
vSphere.Password = ""
- assert.False(t, vSphere.Init())
+ assert.Error(t, vSphere.Init())
}
func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) {
@@ -65,7 +79,7 @@ func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) {
defer teardown()
vSphere.Client.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, vSphere.Init())
+ assert.Error(t, vSphere.Init())
}
func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) {
@@ -73,7 +87,7 @@ func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) {
defer teardown()
vSphere.URL = "http://127.0.0.1:32001"
- assert.False(t, vSphere.Init())
+ assert.Error(t, vSphere.Init())
}
func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) {
@@ -81,16 +95,16 @@ func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) {
defer teardown()
vSphere.HostsInclude = match.HostIncludes{"invalid"}
- assert.False(t, vSphere.Init())
+ assert.Error(t, vSphere.Init())
vSphere.HostsInclude = vSphere.HostsInclude[:0]
vSphere.VMsInclude = match.VMIncludes{"invalid"}
- assert.False(t, vSphere.Init())
+ assert.Error(t, vSphere.Init())
}
func TestVSphere_Check(t *testing.T) {
- assert.NotNil(t, New().Check())
+ assert.NoError(t, New().Check())
}
func TestVSphere_Charts(t *testing.T) {
@@ -101,7 +115,7 @@ func TestVSphere_Cleanup(t *testing.T) {
vSphere, _, teardown := prepareVSphereSim(t)
defer teardown()
- require.True(t, vSphere.Init())
+ require.NoError(t, vSphere.Init())
vSphere.Cleanup()
time.Sleep(time.Second)
@@ -117,7 +131,7 @@ func TestVSphere_Collect(t *testing.T) {
vSphere, model, teardown := prepareVSphereSim(t)
defer teardown()
- require.True(t, vSphere.Init())
+ require.NoError(t, vSphere.Init())
vSphere.scraper = mockScraper{vSphere.scraper}
@@ -332,8 +346,8 @@ func TestVSphere_Collect_RemoveHostsVMsInRuntime(t *testing.T) {
vSphere, _, teardown := prepareVSphereSim(t)
defer teardown()
- require.True(t, vSphere.Init())
- require.True(t, vSphere.Check())
+ require.NoError(t, vSphere.Init())
+ require.NoError(t, vSphere.Check())
okHostID := "host-50"
okVMID := "vm-64"
@@ -387,9 +401,9 @@ func TestVSphere_Collect_Run(t *testing.T) {
vSphere, model, teardown := prepareVSphereSim(t)
defer teardown()
- vSphere.DiscoveryInterval.Duration = time.Second * 2
- require.True(t, vSphere.Init())
- require.True(t, vSphere.Check())
+ vSphere.DiscoveryInterval = web.Duration(time.Second * 2)
+ require.NoError(t, vSphere.Init())
+ require.NoError(t, vSphere.Check())
runs := 20
for i := 0; i < runs; i++ {
diff --git a/modules/weblog/config_schema.json b/modules/weblog/config_schema.json
index 82b6c358c..7673912d6 100644
--- a/modules/weblog/config_schema.json
+++ b/modules/weblog/config_schema.json
@@ -1,208 +1,388 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/web_log job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "parser": {
- "type": "object",
- "properties": {
- "log_type": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update frequency",
+ "description": "The data collection frequency in seconds.",
+ "minimum": 1,
+ "default": 1,
+ "type": "integer"
+ },
+ "path": {
+ "title": "Log file path",
+ "description": "The file path to the Webserver log file.",
+ "type": "string",
+ "default": "/var/log/nginx/access.log"
+ },
+ "exclude_path": {
+ "title": "Exclude path",
+ "description": "Pattern to exclude log files.",
+ "type": "string",
+ "default": "*.gz"
+ },
+ "histogram": {
+ "title": "Request processing time histogram",
+ "description": "Buckets for the histogram in milliseconds.",
+ "type": "array",
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
},
- "csv_config": {
+ "uniqueItems": true
+ },
+ "log_type": {
+ "title": "Log parser",
+ "description": "Type of parser to use for parsing the Squid server log file.",
+ "type": "string",
+ "enum": [
+ "auto",
+ "csv",
+ "regexp",
+ "json",
+ "ltsv"
+ ],
+ "default": "auto"
+ },
+ "url_patterns": {
+ "title": "URL patterns",
+ "description": "Patterns used to match against the full original request URI. For each pattern, the web log will collect responses by status code, method, bandwidth, and processing time.",
+ "type": "array",
+ "items": {
+ "title": "Patterns",
"type": "object",
"properties": {
- "fields_per_record": {
- "type": "integer"
- },
- "delimiter": {
+ "name": {
+ "title": "Dimension",
+ "description": "A unique name used as a dimension name for the pattern.",
"type": "string"
},
- "trim_leading_space": {
- "type": "boolean"
- },
- "format": {
+ "match": {
+ "title": "Pattern",
+ "description": "The pattern string used to match against the full original request URI.",
"type": "string"
}
},
"required": [
- "fields_per_record",
- "delimiter",
- "trim_leading_space",
- "format"
+ "name",
+ "match"
]
},
- "ltsv_config": {
+ "uniqueItems": true
+ },
+ "custom_fields": {
+ "title": "Custom fields",
+ "description": "Configuration for custom fields. Fild value expected to be string. Patterns used to match against the value of the specified field. For each pattern, the web log will collect responses by status code.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "title": "Field configuration",
"type": "object",
"properties": {
- "field_delimiter": {
- "type": "string"
- },
- "value_delimiter": {
+ "name": {
+ "title": "Field name",
+ "description": "The name of the custom field.",
"type": "string"
},
- "mapping": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
+ "patterns": {
+ "title": "Patterns",
+ "description": "",
+ "type": "array",
+ "items": {
+ "title": "User patterns",
+ "type": "object",
+ "properties": {
+ "name": {
+ "title": "Dimension",
+ "description": "A unique name used as a dimension name for the pattern.",
+ "type": "string"
+ },
+ "match": {
+ "title": "Pattern",
+ "description": "The pattern string used to match against the field value.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "match"
+ ]
}
}
},
"required": [
- "field_delimiter",
- "value_delimiter",
- "mapping"
+ "name",
+ "patterns"
]
- },
- "regexp_config": {
+ }
+ },
+ "custom_time_fields": {
+ "title": "Custom time fields",
+ "description": "Configuration for custom time fields. Field value expected to be numeric and represent time. For each field, the web log will calculate the minimum, average, maximum value, and histogram.",
+ "type": "array",
+ "items": {
+ "title": "Field configuration",
"type": "object",
"properties": {
- "pattern": {
+ "name": {
+ "title": "Field mame",
+ "description": "The name of the custom time field.",
"type": "string"
+ },
+ "histogram": {
+ "title": "Histogram",
+ "description": "Buckets for the histogram in milliseconds.",
+ "type": "array",
+ "uniqueItems": true,
+ "items": {
+ "title": "Bucket",
+ "type": "number",
+ "exclusiveMinimum": 0
+ },
+ "default": [
+ 0.005,
+ 0.01,
+ 0.025,
+ 0.05,
+ 0.1,
+ 0.25,
+ 0.5,
+ 1,
+ 2.5,
+ 5,
+ 10
+ ]
}
},
"required": [
- "pattern"
+ "name"
]
- },
- "json_config": {
+ }
+ },
+ "custom_numeric_fields": {
+ "title": "Custom numeric field",
+ "description": "Configuration for custom numeric fields. Fild value expected to be numeric. For each field, the web log will calculate the minimum, average, maximum value.",
+ "type": "array",
+ "items": {
+ "title": "Field configuration",
"type": "object",
"properties": {
- "mapping": {
- "type": "object",
- "additionalProperties": {
- "type": "string"
- }
+ "name": {
+ "title": "Name",
+ "description": "The name of the custom numeric field.",
+ "type": "string"
+ },
+ "units": {
+ "title": "Units",
+ "description": "The unit label for the vertical axis on charts.",
+ "type": "string"
+ },
+ "multiplier": {
+ "title": "Multiplier",
+ "description": "A value to multiply the field value.",
+ "type": "number",
+ "not": {
+ "const": 0
+ },
+ "default": 1
+ },
+ "divisor": {
+ "title": "Divisor",
+ "description": "A value to divide the field value.",
+ "type": "number",
+ "not": {
+ "const": 0
+ },
+ "default": 1
}
},
"required": [
- "mapping"
+ "name",
+ "units",
+ "multiplier",
+ "divisor"
]
}
- },
- "required": [
- "log_type"
- ]
- },
- "path": {
- "type": "string"
- },
- "exclude_path": {
- "type": "string"
- },
- "url_patterns": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "match": {
- "type": "string"
- }
- },
- "required": [
- "name",
- "match"
- ]
}
},
- "custom_fields": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "required": [
+ "path",
+ "log_type"
+ ],
+ "dependencies": {
+ "log_type": {
+ "oneOf": [
+ {
+ "properties": {
+ "log_type": {
+ "const": "auto"
+ }
+ }
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "csv"
+ },
+ "csv_config": {
+ "title": "CSV parser configuration",
+ "type": "object",
+ "properties": {
+ "format": {
+ "title": "Format",
+ "description": "Log format.",
+ "type": "string",
+ "default": "$remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent"
+ },
+ "delimiter": {
+ "title": "Delimiter",
+ "description": "Delimiter used to separate fields in the log file. Default: space (' ').",
+ "type": "string",
+ "default": " "
+ }
+ },
+ "required": [
+ "format",
+ "delimiter"
+ ]
+ }
+ },
+ "required": [
+ "csv_config"
+ ]
},
- "patterns": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ {
+ "properties": {
+ "log_type": {
+ "const": "regexp"
+ },
+ "regexp_config": {
+ "title": "Regular expression parser configuration",
+ "type": "object",
+ "properties": {
+ "pattern": {
+ "title": "Pattern with named groups",
+ "description": "Regular expression pattern with named groups. Use named groups for known fields.",
+ "type": "string",
+ "default": ""
+ }
},
- "match": {
- "type": "string"
- }
+ "required": [
+ "pattern"
+ ]
+ }
+ },
+ "required": [
+ "regexp_config"
+ ]
+ },
+ {
+ "properties": {
+ "log_type": {
+ "const": "json"
},
- "required": [
- "name",
- "match"
- ]
+ "json_config": {
+ "title": "JSON parser configuration",
+ "type": "object",
+ "properties": {
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
}
- }
- },
- "required": [
- "name",
- "patterns"
- ]
- }
- },
- "custom_time_fields": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
},
- "histogram": {
- "type": "array",
- "items": {
- "type": "number"
+ {
+ "properties": {
+ "log_type": {
+ "const": "ltsv"
+ },
+ "ltsv_config": {
+ "title": "LTSV parser configuration",
+ "type": "object",
+ "properties": {
+ "field_delimiter": {
+ "title": "Field delimiter",
+ "description": "Delimiter used to separate fields in LTSV logs. Default: tab ('\\t').",
+ "type": "string",
+ "default": "\t"
+ },
+ "value_delimiter": {
+ "title": "Value delimiter",
+ "description": "Delimiter used to separate label-value pairs in LTSV logs.",
+ "type": "string",
+ "default": ":"
+ },
+ "mapping": {
+ "title": "Field mapping",
+ "description": "Dictionary mapping fields in logs to known fields.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ }
+ }
+ }
}
}
- },
- "required": [
- "name",
- "histogram"
- ]
- }
- },
- "custom_numeric_fields": {
- "type": "array",
- "items": {
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "units": {
- "type": "string"
- },
- "multiplier": {
- "type": "integer"
- },
- "divisor": {
- "type": "integer"
- }
- },
- "required": [
- "name",
- "units",
- "multiplier",
- "divisor"
]
}
+ }
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
- "histogram": {
- "type": "array",
- "items": {
- "type": "number"
+ "log_type": {
+ "ui:widget": "radio",
+ "ui:options": {
+ "inline": true
}
},
- "group_response_codes": {
- "type": "boolean"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "path",
+ "exclude_path",
+ "histogram"
+ ]
+ },
+ {
+ "title": "Parser",
+ "fields": [
+ "log_type",
+ "csv_config",
+ "ltsv_config",
+ "regexp_config",
+ "json_config"
+ ]
+ },
+ {
+ "title": "URL patterns",
+ "fields": [
+ "url_patterns"
+ ]
+ },
+ {
+ "title": "Custom fields",
+ "fields": [
+ "custom_fields",
+ "custom_time_fields",
+ "custom_numeric_fields"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "path"
- ]
+ }
}
diff --git a/modules/weblog/parser.go b/modules/weblog/parser.go
index 11a6d2832..07cbc2970 100644
--- a/modules/weblog/parser.go
+++ b/modules/weblog/parser.go
@@ -73,7 +73,7 @@ var (
)
func (w *WebLog) newParser(record []byte) (logs.Parser, error) {
- if w.Parser.LogType == typeAuto {
+ if w.ParserConfig.LogType == typeAuto {
w.Debugf("log_type is %s, will try format auto-detection", typeAuto)
if len(record) == 0 {
return nil, fmt.Errorf("empty line, can't auto-detect format (%s)", w.file.CurrentFilename())
@@ -81,30 +81,30 @@ func (w *WebLog) newParser(record []byte) (logs.Parser, error) {
return w.guessParser(record)
}
- w.Parser.CSV.Format = cleanApacheLogFormat(w.Parser.CSV.Format)
- w.Debugf("log_type is %s, skipping auto-detection", w.Parser.LogType)
- switch w.Parser.LogType {
+ w.ParserConfig.CSV.Format = cleanApacheLogFormat(w.ParserConfig.CSV.Format)
+ w.Debugf("log_type is %s, skipping auto-detection", w.ParserConfig.LogType)
+ switch w.ParserConfig.LogType {
case logs.TypeCSV:
- w.Debugf("config: %+v", w.Parser.CSV)
+ w.Debugf("config: %+v", w.ParserConfig.CSV)
case logs.TypeLTSV:
- w.Debugf("config: %+v", w.Parser.LogType)
+ w.Debugf("config: %+v", w.ParserConfig.LogType)
case logs.TypeRegExp:
- w.Debugf("config: %+v", w.Parser.RegExp)
+ w.Debugf("config: %+v", w.ParserConfig.RegExp)
case logs.TypeJSON:
- w.Debugf("config: %+v", w.Parser.JSON)
+ w.Debugf("config: %+v", w.ParserConfig.JSON)
}
- return logs.NewParser(w.Parser, w.file)
+ return logs.NewParser(w.ParserConfig, w.file)
}
func (w *WebLog) guessParser(record []byte) (logs.Parser, error) {
w.Debug("starting log type auto-detection")
if reLTSV.Match(record) {
w.Debug("log type is LTSV")
- return logs.NewLTSVParser(w.Parser.LTSV, w.file)
+ return logs.NewLTSVParser(w.ParserConfig.LTSV, w.file)
}
if reJSON.Match(record) {
w.Debug("log type is JSON")
- return logs.NewJSONParser(w.Parser.JSON, w.file)
+ return logs.NewJSONParser(w.ParserConfig.JSON, w.file)
}
w.Debug("log type is CSV")
return w.guessCSVParser(record)
@@ -112,10 +112,10 @@ func (w *WebLog) guessParser(record []byte) (logs.Parser, error) {
func (w *WebLog) guessCSVParser(record []byte) (logs.Parser, error) {
w.Debug("starting csv log format auto-detection")
- w.Debugf("config: %+v", w.Parser.CSV)
+ w.Debugf("config: %+v", w.ParserConfig.CSV)
for _, format := range guessOrder {
format = cleanCSVFormat(format)
- cfg := w.Parser.CSV
+ cfg := w.ParserConfig.CSV
cfg.Format = format
w.Debugf("trying format: '%s'", format)
diff --git a/modules/weblog/parser_test.go b/modules/weblog/parser_test.go
index 4e449b60c..bd1a63af6 100644
--- a/modules/weblog/parser_test.go
+++ b/modules/weblog/parser_test.go
@@ -218,7 +218,7 @@ func prepareWebLog() *WebLog {
return &WebLog{
Config: Config{
GroupRespCodes: false,
- Parser: cfg,
+ ParserConfig: cfg,
},
}
}
diff --git a/modules/weblog/testdata/config.json b/modules/weblog/testdata/config.json
new file mode 100644
index 000000000..80b51736d
--- /dev/null
+++ b/modules/weblog/testdata/config.json
@@ -0,0 +1,64 @@
+{
+ "update_every": 123,
+ "path": "ok",
+ "exclude_path": "ok",
+ "log_type": "ok",
+ "csv_config": {
+ "fields_per_record": 123,
+ "delimiter": "ok",
+ "trim_leading_space": true,
+ "format": "ok"
+ },
+ "ltsv_config": {
+ "field_delimiter": "ok",
+ "value_delimiter": "ok",
+ "mapping": {
+ "ok": "ok"
+ }
+ },
+ "regexp_config": {
+ "pattern": "ok"
+ },
+ "json_config": {
+ "mapping": {
+ "ok": "ok"
+ }
+ },
+ "url_patterns": [
+ {
+ "name": "ok",
+ "match": "ok"
+ }
+ ],
+ "custom_fields": [
+ {
+ "name": "ok",
+ "patterns": [
+ {
+ "name": "ok",
+ "match": "ok"
+ }
+ ]
+ }
+ ],
+ "custom_time_fields": [
+ {
+ "name": "ok",
+ "histogram": [
+ 123.123
+ ]
+ }
+ ],
+ "custom_numeric_fields": [
+ {
+ "name": "ok",
+ "units": "ok",
+ "multiplier": 123,
+ "divisor": 123
+ }
+ ],
+ "histogram": [
+ 123.123
+ ],
+ "group_response_codes": true
+}
diff --git a/modules/weblog/testdata/config.yaml b/modules/weblog/testdata/config.yaml
new file mode 100644
index 000000000..64f60763a
--- /dev/null
+++ b/modules/weblog/testdata/config.yaml
@@ -0,0 +1,39 @@
+update_every: 123
+path: "ok"
+exclude_path: "ok"
+log_type: "ok"
+csv_config:
+ fields_per_record: 123
+ delimiter: "ok"
+ trim_leading_space: yes
+ format: "ok"
+ltsv_config:
+ field_delimiter: "ok"
+ value_delimiter: "ok"
+ mapping:
+ ok: "ok"
+regexp_config:
+ pattern: "ok"
+json_config:
+ mapping:
+ ok: "ok"
+url_patterns:
+ - name: "ok"
+ match: "ok"
+custom_fields:
+ - name: "ok"
+ patterns:
+ - name: "ok"
+ match: "ok"
+custom_time_fields:
+ - name: "ok"
+ histogram:
+ - 123.123
+custom_numeric_fields:
+ - name: "ok"
+ units: "ok"
+ multiplier: 123
+ divisor: 123
+histogram:
+ - 123.123
+group_response_codes: yes
diff --git a/modules/weblog/weblog.go b/modules/weblog/weblog.go
index 27bf43f9a..910612ded 100644
--- a/modules/weblog/weblog.go
+++ b/modules/weblog/weblog.go
@@ -24,7 +24,7 @@ func New() *WebLog {
Config: Config{
ExcludePath: "*.gz",
GroupRespCodes: true,
- Parser: logs.ParserConfig{
+ ParserConfig: logs.ParserConfig{
LogType: typeAuto,
CSV: logs.CSVConfig{
FieldsPerRecord: -1,
@@ -45,67 +45,73 @@ func New() *WebLog {
type (
Config struct {
- Parser logs.ParserConfig `yaml:",inline"`
- Path string `yaml:"path"`
- ExcludePath string `yaml:"exclude_path"`
- URLPatterns []userPattern `yaml:"url_patterns"`
- CustomFields []customField `yaml:"custom_fields"`
- CustomTimeFields []customTimeField `yaml:"custom_time_fields"`
- CustomNumericFields []customNumericField `yaml:"custom_numeric_fields"`
- Histogram []float64 `yaml:"histogram"`
- GroupRespCodes bool `yaml:"group_response_codes"`
+ logs.ParserConfig `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Path string `yaml:"path" json:"path"`
+ ExcludePath string `yaml:"exclude_path" json:"exclude_path"`
+ URLPatterns []userPattern `yaml:"url_patterns" json:"url_patterns"`
+ CustomFields []customField `yaml:"custom_fields" json:"custom_fields"`
+ CustomTimeFields []customTimeField `yaml:"custom_time_fields" json:"custom_time_fields"`
+ CustomNumericFields []customNumericField `yaml:"custom_numeric_fields" json:"custom_numeric_fields"`
+ Histogram []float64 `yaml:"histogram" json:"histogram"`
+ GroupRespCodes bool `yaml:"group_response_codes" json:"group_response_codes"`
}
userPattern struct {
- Name string `yaml:"name"`
- Match string `yaml:"match"`
+ Name string `yaml:"name" json:"name"`
+ Match string `yaml:"match" json:"match"`
}
customField struct {
- Name string `yaml:"name"`
- Patterns []userPattern `yaml:"patterns"`
+ Name string `yaml:"name" json:"name"`
+ Patterns []userPattern `yaml:"patterns" json:"patterns"`
}
customTimeField struct {
- Name string `yaml:"name"`
- Histogram []float64 `yaml:"histogram"`
+ Name string `yaml:"name" json:"name"`
+ Histogram []float64 `yaml:"histogram" json:"histogram"`
}
customNumericField struct {
- Name string `yaml:"name"`
- Units string `yaml:"units"`
- Multiplier int `yaml:"multiplier"`
- Divisor int `yaml:"divisor"`
+ Name string `yaml:"name" json:"name"`
+ Units string `yaml:"units" json:"units"`
+ Multiplier int `yaml:"multiplier" json:"multiplier"`
+ Divisor int `yaml:"divisor" json:"divisor"`
}
)
type WebLog struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
- file *logs.Reader
- parser logs.Parser
- line *logLine
- urlPatterns []*pattern
+ charts *module.Charts
+
+ file *logs.Reader
+ parser logs.Parser
+ line *logLine
+ urlPatterns []*pattern
customFields map[string][]*pattern
customTimeFields map[string][]float64
customNumericFields map[string]bool
- charts *module.Charts
- mx *metricsData
+ mx *metricsData
}
-func (w *WebLog) Init() bool {
+func (w *WebLog) Configuration() any {
+ return w.Config
+}
+
+func (w *WebLog) Init() error {
if err := w.createURLPatterns(); err != nil {
w.Errorf("init failed: %v", err)
- return false
+ return err
}
if err := w.createCustomFields(); err != nil {
w.Errorf("init failed: %v", err)
- return false
+ return err
}
if err := w.createCustomTimeFields(); err != nil {
w.Errorf("init failed: %v", err)
- return false
+ return err
}
if err := w.createCustomNumericFields(); err != nil {
@@ -115,26 +121,27 @@ func (w *WebLog) Init() bool {
w.createLogLine()
w.mx = newMetricsData(w.Config)
- return true
+ return nil
}
-func (w *WebLog) Check() bool {
+func (w *WebLog) Check() error {
// Note: these inits are here to make auto-detection retry working
if err := w.createLogReader(); err != nil {
w.Warning("check failed: ", err)
- return false
+ return err
}
if err := w.createParser(); err != nil {
w.Warning("check failed: ", err)
- return false
+ return err
}
if err := w.createCharts(w.line); err != nil {
w.Warning("check failed: ", err)
- return false
+ return err
}
- return true
+
+ return nil
}
func (w *WebLog) Charts() *module.Charts {
diff --git a/modules/weblog/weblog_test.go b/modules/weblog/weblog_test.go
index 6195d2e49..9ddf07be8 100644
--- a/modules/weblog/weblog_test.go
+++ b/modules/weblog/weblog_test.go
@@ -11,98 +11,107 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/logs"
"github.com/netdata/go.d.plugin/pkg/metrics"
- "github.com/netdata/go.d.plugin/agent/module"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testCommonLog, _ = os.ReadFile("testdata/common.log")
- testFullLog, _ = os.ReadFile("testdata/full.log")
- testCustomLog, _ = os.ReadFile("testdata/custom.log")
- testCustomTimeFieldLog, _ = os.ReadFile("testdata/custom_time_fields.log")
- testIISLog, _ = os.ReadFile("testdata/u_ex221107.log")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataCommonLog, _ = os.ReadFile("testdata/common.log")
+ dataFullLog, _ = os.ReadFile("testdata/full.log")
+ dataCustomLog, _ = os.ReadFile("testdata/custom.log")
+ dataCustomTimeFieldLog, _ = os.ReadFile("testdata/custom_time_fields.log")
+ dataIISLog, _ = os.ReadFile("testdata/u_ex221107.log")
)
-func Test_readTestData(t *testing.T) {
- assert.NotNil(t, testFullLog)
- assert.NotNil(t, testCommonLog)
- assert.NotNil(t, testCustomLog)
- assert.NotNil(t, testCustomTimeFieldLog)
- assert.NotNil(t, testIISLog)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataCommonLog": dataCommonLog,
+ "dataFullLog": dataFullLog,
+ "dataCustomLog": dataCustomLog,
+ "dataCustomTimeFieldLog": dataCustomTimeFieldLog,
+ "dataIISLog": dataIISLog,
+ } {
+ require.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- assert.Implements(t, (*module.Module)(nil), New())
+func TestWebLog_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &WebLog{}, dataConfigJSON, dataConfigYAML)
}
func TestWebLog_Init(t *testing.T) {
weblog := New()
- assert.True(t, weblog.Init())
+ assert.NoError(t, weblog.Init())
}
func TestWebLog_Init_ErrorOnCreatingURLPatterns(t *testing.T) {
weblog := New()
weblog.URLPatterns = []userPattern{{Match: "* !*"}}
- assert.False(t, weblog.Init())
+ assert.Error(t, weblog.Init())
}
func TestWebLog_Init_ErrorOnCreatingCustomFields(t *testing.T) {
weblog := New()
weblog.CustomFields = []customField{{Patterns: []userPattern{{Name: "p1", Match: "* !*"}}}}
- assert.False(t, weblog.Init())
+ assert.Error(t, weblog.Init())
}
func TestWebLog_Check(t *testing.T) {
weblog := New()
defer weblog.Cleanup()
weblog.Path = "testdata/common.log"
- require.True(t, weblog.Init())
+ require.NoError(t, weblog.Init())
- assert.True(t, weblog.Check())
+ assert.NoError(t, weblog.Check())
}
func TestWebLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) {
weblog := New()
defer weblog.Cleanup()
weblog.Path = "testdata/not_exists.log"
- require.True(t, weblog.Init())
+ require.NoError(t, weblog.Init())
- assert.False(t, weblog.Check())
+ assert.Error(t, weblog.Check())
}
func TestWebLog_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) {
weblog := New()
defer weblog.Cleanup()
weblog.Path = "testdata/custom.log"
- require.True(t, weblog.Init())
+ require.NoError(t, weblog.Init())
- assert.False(t, weblog.Check())
+ assert.Error(t, weblog.Check())
}
func TestWebLog_Check_ErrorOnCreatingParserEmptyLine(t *testing.T) {
weblog := New()
defer weblog.Cleanup()
weblog.Path = "testdata/custom.log"
- weblog.Parser.LogType = logs.TypeCSV
- weblog.Parser.CSV.Format = "$one $two"
- require.True(t, weblog.Init())
+ weblog.ParserConfig.LogType = logs.TypeCSV
+ weblog.ParserConfig.CSV.Format = "$one $two"
+ require.NoError(t, weblog.Init())
- assert.False(t, weblog.Check())
+ assert.Error(t, weblog.Check())
}
func TestWebLog_Charts(t *testing.T) {
weblog := New()
defer weblog.Cleanup()
weblog.Path = "testdata/common.log"
- require.True(t, weblog.Init())
- require.True(t, weblog.Check())
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
assert.NotNil(t, weblog.Charts())
}
@@ -1142,7 +1151,7 @@ func prepareWebLogCollectFull(t *testing.T) *WebLog {
}, " ")
cfg := Config{
- Parser: logs.ParserConfig{
+ ParserConfig: logs.ParserConfig{
LogType: logs.TypeCSV,
CSV: logs.CSVConfig{
FieldsPerRecord: -1,
@@ -1187,11 +1196,11 @@ func prepareWebLogCollectFull(t *testing.T) *WebLog {
}
weblog := New()
weblog.Config = cfg
- require.True(t, weblog.Init())
- require.True(t, weblog.Check())
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
defer weblog.Cleanup()
- p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testFullLog))
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataFullLog))
require.NoError(t, err)
weblog.parser = p
return weblog
@@ -1210,7 +1219,7 @@ func prepareWebLogCollectCommon(t *testing.T) *WebLog {
}, " ")
cfg := Config{
- Parser: logs.ParserConfig{
+ ParserConfig: logs.ParserConfig{
LogType: logs.TypeCSV,
CSV: logs.CSVConfig{
FieldsPerRecord: -1,
@@ -1230,11 +1239,11 @@ func prepareWebLogCollectCommon(t *testing.T) *WebLog {
weblog := New()
weblog.Config = cfg
- require.True(t, weblog.Init())
- require.True(t, weblog.Check())
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
defer weblog.Cleanup()
- p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCommonLog))
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCommonLog))
require.NoError(t, err)
weblog.parser = p
return weblog
@@ -1248,7 +1257,7 @@ func prepareWebLogCollectCustom(t *testing.T) *WebLog {
}, " ")
cfg := Config{
- Parser: logs.ParserConfig{
+ ParserConfig: logs.ParserConfig{
LogType: logs.TypeCSV,
CSV: logs.CSVConfig{
FieldsPerRecord: 2,
@@ -1282,11 +1291,11 @@ func prepareWebLogCollectCustom(t *testing.T) *WebLog {
}
weblog := New()
weblog.Config = cfg
- require.True(t, weblog.Init())
- require.True(t, weblog.Check())
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
defer weblog.Cleanup()
- p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomLog))
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCustomLog))
require.NoError(t, err)
weblog.parser = p
return weblog
@@ -1300,7 +1309,7 @@ func prepareWebLogCollectCustomTimeFields(t *testing.T) *WebLog {
}, " ")
cfg := Config{
- Parser: logs.ParserConfig{
+ ParserConfig: logs.ParserConfig{
LogType: logs.TypeCSV,
CSV: logs.CSVConfig{
FieldsPerRecord: 2,
@@ -1328,11 +1337,11 @@ func prepareWebLogCollectCustomTimeFields(t *testing.T) *WebLog {
}
weblog := New()
weblog.Config = cfg
- require.True(t, weblog.Init())
- require.True(t, weblog.Check())
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
defer weblog.Cleanup()
- p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog))
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCustomTimeFieldLog))
require.NoError(t, err)
weblog.parser = p
return weblog
@@ -1346,7 +1355,7 @@ func prepareWebLogCollectCustomNumericFields(t *testing.T) *WebLog {
}, " ")
cfg := Config{
- Parser: logs.ParserConfig{
+ ParserConfig: logs.ParserConfig{
LogType: logs.TypeCSV,
CSV: logs.CSVConfig{
FieldsPerRecord: 2,
@@ -1374,11 +1383,11 @@ func prepareWebLogCollectCustomNumericFields(t *testing.T) *WebLog {
}
weblog := New()
weblog.Config = cfg
- require.True(t, weblog.Init())
- require.True(t, weblog.Check())
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
defer weblog.Cleanup()
- p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog))
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataCustomTimeFieldLog))
require.NoError(t, err)
weblog.parser = p
return weblog
@@ -1404,7 +1413,7 @@ func prepareWebLogCollectIISFields(t *testing.T) *WebLog {
"$request_time", // time-taken
}, " ")
cfg := Config{
- Parser: logs.ParserConfig{
+ ParserConfig: logs.ParserConfig{
LogType: logs.TypeCSV,
CSV: logs.CSVConfig{
// Users can define number of fields
@@ -1424,11 +1433,11 @@ func prepareWebLogCollectIISFields(t *testing.T) *WebLog {
weblog := New()
weblog.Config = cfg
- require.True(t, weblog.Init())
- require.True(t, weblog.Check())
+ require.NoError(t, weblog.Init())
+ require.NoError(t, weblog.Check())
defer weblog.Cleanup()
- p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testIISLog))
+ p, err := logs.NewCSVParser(weblog.ParserConfig.CSV, bytes.NewReader(dataIISLog))
require.NoError(t, err)
weblog.parser = p
return weblog
diff --git a/modules/whoisquery/config_schema.json b/modules/whoisquery/config_schema.json
index 9f5131789..4461b70a8 100644
--- a/modules/whoisquery/config_schema.json
+++ b/modules/whoisquery/config_schema.json
@@ -1,29 +1,53 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/whoisquery job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "WHOIS query collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "source": {
+ "title": "Domain",
+ "description": "The domain for which WHOIS queries will be performed.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the WHOIS query.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "days_until_expiration_warning": {
+ "title": "Days until warning",
+ "description": "Number of days before the alarm status is set to warning.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 90
+ },
+ "days_until_expiration_critical": {
+ "title": "Days until critical",
+ "description": "Number of days before the alarm status is set to critical.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 30
+ }
},
- "source": {
- "type": "string"
+ "required": [
+ "source"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "days_until_expiration_warning": {
- "type": "integer"
- },
- "days_until_expiration_critical": {
- "type": "integer"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
}
- },
- "required": [
- "name",
- "source"
- ]
+ }
}
diff --git a/modules/whoisquery/provider.go b/modules/whoisquery/provider.go
index 71318dd81..032f979f4 100644
--- a/modules/whoisquery/provider.go
+++ b/modules/whoisquery/provider.go
@@ -23,7 +23,7 @@ type fromNet struct {
func newProvider(config Config) (provider, error) {
domain := config.Source
client := whois.NewClient()
- client.SetTimeout(config.Timeout.Duration)
+ client.SetTimeout(config.Timeout.Duration())
return &fromNet{
domainAddress: domain,
diff --git a/modules/whoisquery/testdata/config.json b/modules/whoisquery/testdata/config.json
new file mode 100644
index 000000000..e633bd4ed
--- /dev/null
+++ b/modules/whoisquery/testdata/config.json
@@ -0,0 +1,7 @@
+{
+ "update_every": 123,
+ "source": "ok",
+ "timeout": 123.123,
+ "days_until_expiration_warning": 123,
+ "days_until_expiration_critical": 123
+}
diff --git a/modules/whoisquery/testdata/config.yaml b/modules/whoisquery/testdata/config.yaml
new file mode 100644
index 000000000..ad4c501c0
--- /dev/null
+++ b/modules/whoisquery/testdata/config.yaml
@@ -0,0 +1,5 @@
+update_every: 123
+source: "ok"
+timeout: 123.123
+days_until_expiration_warning: 123
+days_until_expiration_critical: 123
diff --git a/modules/whoisquery/whoisquery.go b/modules/whoisquery/whoisquery.go
index 6265b4fb6..fe372dbbf 100644
--- a/modules/whoisquery/whoisquery.go
+++ b/modules/whoisquery/whoisquery.go
@@ -4,6 +4,7 @@ package whoisquery
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -26,7 +27,7 @@ func init() {
func New() *WhoisQuery {
return &WhoisQuery{
Config: Config{
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second * 5),
DaysUntilWarn: 90,
DaysUntilCrit: 30,
},
@@ -34,41 +35,54 @@ func New() *WhoisQuery {
}
type Config struct {
- Source string
- Timeout web.Duration `yaml:"timeout"`
- DaysUntilWarn int64 `yaml:"days_until_expiration_warning"`
- DaysUntilCrit int64 `yaml:"days_until_expiration_critical"`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Source string `yaml:"source" json:"source"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ DaysUntilWarn int64 `yaml:"days_until_expiration_warning" json:"days_until_expiration_warning"`
+ DaysUntilCrit int64 `yaml:"days_until_expiration_critical" json:"days_until_expiration_critical"`
}
type WhoisQuery struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
prov provider
}
-func (w *WhoisQuery) Init() bool {
+func (w *WhoisQuery) Configuration() any {
+ return w.Config
+}
+
+func (w *WhoisQuery) Init() error {
if err := w.validateConfig(); err != nil {
w.Errorf("config validation: %v", err)
- return false
+ return err
}
prov, err := w.initProvider()
if err != nil {
w.Errorf("init whois provider: %v", err)
- return false
+ return err
}
w.prov = prov
w.charts = w.initCharts()
- return true
+ return nil
}
-func (w *WhoisQuery) Check() bool {
- return len(w.Collect()) > 0
+func (w *WhoisQuery) Check() error {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (w *WhoisQuery) Charts() *module.Charts {
diff --git a/modules/whoisquery/whoisquery_test.go b/modules/whoisquery/whoisquery_test.go
index 1f3c827bd..f96e372b4 100644
--- a/modules/whoisquery/whoisquery_test.go
+++ b/modules/whoisquery/whoisquery_test.go
@@ -4,12 +4,33 @@ package whoisquery
import (
"errors"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ require.NotNil(t, data, name)
+ }
+}
+
+func TestWhoisQuery_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &WhoisQuery{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestWhoisQuery_Cleanup(t *testing.T) {
New().Cleanup()
}
@@ -17,7 +38,7 @@ func TestWhoisQuery_Cleanup(t *testing.T) {
func TestWhoisQuery_Charts(t *testing.T) {
whoisquery := New()
whoisquery.Source = "example.com"
- require.True(t, whoisquery.Init())
+ require.NoError(t, whoisquery.Init())
assert.NotNil(t, whoisquery.Charts())
}
@@ -45,9 +66,9 @@ func TestWhoisQuery_Init(t *testing.T) {
whoisquery.Config = test.config
if test.err {
- assert.False(t, whoisquery.Init())
+ assert.Error(t, whoisquery.Init())
} else {
- require.True(t, whoisquery.Init())
+ require.NoError(t, whoisquery.Init())
var typeOK bool
if test.providerType == net {
@@ -64,20 +85,20 @@ func TestWhoisQuery_Check(t *testing.T) {
whoisquery := New()
whoisquery.prov = &mockProvider{remTime: 12345.678}
- assert.True(t, whoisquery.Check())
+ assert.NoError(t, whoisquery.Check())
}
func TestWhoisQuery_Check_ReturnsFalseOnProviderError(t *testing.T) {
whoisquery := New()
whoisquery.prov = &mockProvider{err: true}
- assert.False(t, whoisquery.Check())
+ assert.Error(t, whoisquery.Check())
}
func TestWhoisQuery_Collect(t *testing.T) {
whoisquery := New()
whoisquery.Source = "example.com"
- require.True(t, whoisquery.Init())
+ require.NoError(t, whoisquery.Init())
whoisquery.prov = &mockProvider{remTime: 12345}
collected := whoisquery.Collect()
@@ -96,7 +117,7 @@ func TestWhoisQuery_Collect(t *testing.T) {
func TestWhoisQuery_Collect_ReturnsNilOnProviderError(t *testing.T) {
whoisquery := New()
whoisquery.Source = "example.com"
- require.True(t, whoisquery.Init())
+ require.NoError(t, whoisquery.Init())
whoisquery.prov = &mockProvider{err: true}
assert.Nil(t, whoisquery.Collect())
diff --git a/modules/windows/config_schema.json b/modules/windows/config_schema.json
index 1668dd905..bcae9c8a1 100644
--- a/modules/windows/config_schema.json
+++ b/modules/windows/config_schema.json
@@ -1,59 +1,152 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/windows job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
- },
- "url": {
- "type": "string"
- },
- "timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "username": {
- "type": "string"
- },
- "password": {
- "type": "string"
- },
- "proxy_url": {
- "type": "string"
- },
- "proxy_username": {
- "type": "string"
- },
- "proxy_password": {
- "type": "string"
- },
- "headers": {
- "type": "object",
- "additionalProperties": {
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Windows collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 5
+ },
+ "url": {
+ "title": "URL",
+ "description": "The URL of the Windows exporter metrics endpoint.",
+ "type": "string",
+ "default": ""
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the HTTP request.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 5
+ },
+ "not_follow_redirects": {
+ "title": "Not follow redirects",
+ "description": "If set to true, the client will not follow HTTP redirects automatically.",
+ "type": "boolean"
+ },
+ "username": {
+ "title": "Username",
+ "description": "The username for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "password": {
+ "title": "Password",
+ "description": "The password for basic authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_url": {
+ "title": "Proxy URL",
+ "description": "The URL of the proxy server (if required).",
+ "type": "string"
+ },
+ "proxy_username": {
+ "title": "Proxy username",
+ "description": "The username for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "proxy_password": {
+ "title": "Proxy password",
+ "description": "The password for proxy authentication (if required).",
+ "type": "string",
+ "sensitive": true
+ },
+ "headers": {
+ "title": "Headers",
+ "description": "Additional HTTP headers to include in the request.",
+ "type": "object",
+ "additionalProperties": {
+ "type": "string"
+ }
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
"type": "string"
}
},
- "not_follow_redirects": {
- "type": "boolean"
+ "required": [
+ "url"
+ ]
+ },
+ "uiSchema": {
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "url",
+ "timeout",
+ "not_follow_redirects"
+ ]
+ },
+ {
+ "title": "Auth",
+ "fields": [
+ "username",
+ "password"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ },
+ {
+ "title": "Proxy",
+ "fields": [
+ "proxy_url",
+ "proxy_username",
+ "proxy_password"
+ ]
+ },
+ {
+ "title": "Headers",
+ "fields": [
+ "headers"
+ ]
+ }
+ ]
},
- "tls_ca": {
- "type": "string"
+ "uiOptions": {
+ "fullPage": true
},
- "tls_cert": {
- "type": "string"
+ "timeout": {
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_key": {
- "type": "string"
+ "password": {
+ "ui:widget": "password"
},
- "insecure_skip_verify": {
- "type": "boolean"
+ "proxy_password": {
+ "ui:widget": "password"
}
- },
- "required": [
- "name",
- "url"
- ]
+ }
}
diff --git a/modules/windows/init.go b/modules/windows/init.go
index 34cf83672..51c3c4266 100644
--- a/modules/windows/init.go
+++ b/modules/windows/init.go
@@ -4,7 +4,6 @@ package windows
import (
"errors"
- "net/http"
"github.com/netdata/go.d.plugin/pkg/prometheus"
"github.com/netdata/go.d.plugin/pkg/web"
@@ -17,10 +16,10 @@ func (w *Windows) validateConfig() error {
return nil
}
-func (w *Windows) initHTTPClient() (*http.Client, error) {
- return web.NewHTTPClient(w.Client)
-}
-
-func (w *Windows) initPrometheusClient(client *http.Client) (prometheus.Prometheus, error) {
+func (w *Windows) initPrometheusClient() (prometheus.Prometheus, error) {
+ client, err := web.NewHTTPClient(w.Client)
+ if err != nil {
+ return nil, err
+ }
return prometheus.New(client, w.Request), nil
}
diff --git a/modules/windows/testdata/config.json b/modules/windows/testdata/config.json
new file mode 100644
index 000000000..984c3ed6e
--- /dev/null
+++ b/modules/windows/testdata/config.json
@@ -0,0 +1,20 @@
+{
+ "update_every": 123,
+ "url": "ok",
+ "body": "ok",
+ "method": "ok",
+ "headers": {
+ "ok": "ok"
+ },
+ "username": "ok",
+ "password": "ok",
+ "proxy_url": "ok",
+ "proxy_username": "ok",
+ "proxy_password": "ok",
+ "timeout": 123.123,
+ "not_follow_redirects": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/windows/testdata/config.yaml b/modules/windows/testdata/config.yaml
new file mode 100644
index 000000000..8558b61cc
--- /dev/null
+++ b/modules/windows/testdata/config.yaml
@@ -0,0 +1,17 @@
+update_every: 123
+url: "ok"
+body: "ok"
+method: "ok"
+headers:
+ ok: "ok"
+username: "ok"
+password: "ok"
+proxy_url: "ok"
+proxy_username: "ok"
+proxy_password: "ok"
+timeout: 123.123
+not_follow_redirects: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/windows/windows.go b/modules/windows/windows.go
index e405887e0..d24f0c408 100644
--- a/modules/windows/windows.go
+++ b/modules/windows/windows.go
@@ -4,7 +4,7 @@ package windows
import (
_ "embed"
- "net/http"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -30,7 +30,7 @@ func New() *Windows {
Config: Config{
HTTP: web.HTTP{
Client: web.Client{
- Timeout: web.Duration{Duration: time.Second * 5},
+ Timeout: web.Duration(time.Second * 5),
},
},
},
@@ -68,22 +68,21 @@ func New() *Windows {
}
type Config struct {
- web.HTTP `yaml:",inline"`
+ web.HTTP `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
}
type (
Windows struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
charts *module.Charts
- doCheck bool
-
- httpClient *http.Client
- prom prometheus.Prometheus
+ prom prometheus.Prometheus
- cache cache
+ doCheck bool
+ cache cache
}
cache struct {
cores map[string]bool
@@ -116,31 +115,36 @@ type (
}
)
-func (w *Windows) Init() bool {
+func (w *Windows) Configuration() any {
+ return w.Config
+}
+
+func (w *Windows) Init() error {
if err := w.validateConfig(); err != nil {
w.Errorf("config validation: %v", err)
- return false
- }
-
- httpClient, err := w.initHTTPClient()
- if err != nil {
- w.Errorf("init HTTP client: %v", err)
- return false
+ return err
}
- w.httpClient = httpClient
- prom, err := w.initPrometheusClient(w.httpClient)
+ prom, err := w.initPrometheusClient()
if err != nil {
w.Errorf("init prometheus clients: %v", err)
- return false
+ return err
}
w.prom = prom
- return true
+ return nil
}
-func (w *Windows) Check() bool {
- return len(w.Collect()) > 0
+func (w *Windows) Check() error {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (w *Windows) Charts() *module.Charts {
@@ -160,7 +164,7 @@ func (w *Windows) Collect() map[string]int64 {
}
func (w *Windows) Cleanup() {
- if w.httpClient != nil {
- w.httpClient.CloseIdleConnections()
+ if w.prom != nil && w.prom.HTTPClient() != nil {
+ w.prom.HTTPClient().CloseIdleConnections()
}
}
diff --git a/modules/windows/windows_test.go b/modules/windows/windows_test.go
index b98e40de6..ed897ab83 100644
--- a/modules/windows/windows_test.go
+++ b/modules/windows/windows_test.go
@@ -10,6 +10,7 @@ import (
"strings"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/web"
"github.com/stretchr/testify/assert"
@@ -17,17 +18,26 @@ import (
)
var (
- v0200Metrics, _ = os.ReadFile("testdata/v0.20.0/metrics.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataVer0200Metrics, _ = os.ReadFile("testdata/v0.20.0/metrics.txt")
)
-func Test_TestData(t *testing.T) {
+func Test_testDataIsValid(t *testing.T) {
for name, data := range map[string][]byte{
- "v0200Metrics": v0200Metrics,
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataVer0200Metrics": dataVer0200Metrics,
} {
- assert.NotNilf(t, data, name)
+ assert.NotNil(t, data, name)
}
}
+func TestWindows_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Windows{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestNew(t *testing.T) {
assert.IsType(t, (*Windows)(nil), New())
}
@@ -57,9 +67,9 @@ func TestWindows_Init(t *testing.T) {
win.Config = test.config
if test.wantFail {
- assert.False(t, win.Init())
+ assert.Error(t, win.Init())
} else {
- assert.True(t, win.Init())
+ assert.NoError(t, win.Init())
}
})
}
@@ -92,12 +102,12 @@ func TestWindows_Check(t *testing.T) {
win, cleanup := test.prepare()
defer cleanup()
- require.True(t, win.Init())
+ require.NoError(t, win.Init())
if test.wantFail {
- assert.False(t, win.Check())
+ assert.Error(t, win.Check())
} else {
- assert.True(t, win.Check())
+ assert.NoError(t, win.Check())
}
})
}
@@ -789,7 +799,7 @@ func TestWindows_Collect(t *testing.T) {
win, cleanup := test.prepare()
defer cleanup()
- require.True(t, win.Init())
+ require.NoError(t, win.Init())
mx := win.Collect()
@@ -1053,7 +1063,7 @@ func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, w *Windows, mx map[str
func prepareWindowsV0200() (win *Windows, cleanup func()) {
ts := httptest.NewServer(http.HandlerFunc(
func(w http.ResponseWriter, r *http.Request) {
- _, _ = w.Write(v0200Metrics)
+ _, _ = w.Write(dataVer0200Metrics)
}))
win = New()
diff --git a/modules/wireguard/config_schema.json b/modules/wireguard/config_schema.json
index c6d6c261f..1a51282c7 100644
--- a/modules/wireguard/config_schema.json
+++ b/modules/wireguard/config_schema.json
@@ -1,13 +1,21 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "title": "go.d/wireguard job configuration schema.",
- "type": "object",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "WireGuard collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ }
}
},
- "required": [
- "name"
- ]
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
+ }
+ }
}
diff --git a/modules/wireguard/testdata/config.json b/modules/wireguard/testdata/config.json
new file mode 100644
index 000000000..0e3f7c403
--- /dev/null
+++ b/modules/wireguard/testdata/config.json
@@ -0,0 +1,3 @@
+{
+ "update_every": 123
+}
diff --git a/modules/wireguard/testdata/config.yaml b/modules/wireguard/testdata/config.yaml
new file mode 100644
index 000000000..f21a3a7a0
--- /dev/null
+++ b/modules/wireguard/testdata/config.yaml
@@ -0,0 +1 @@
+update_every: 123
diff --git a/modules/wireguard/wireguard.go b/modules/wireguard/wireguard.go
index 6587dce3c..0936b024a 100644
--- a/modules/wireguard/wireguard.go
+++ b/modules/wireguard/wireguard.go
@@ -4,6 +4,7 @@ package wireguard
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/agent/module"
@@ -32,9 +33,14 @@ func New() *WireGuard {
}
}
+type Config struct {
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+}
+
type (
WireGuard struct {
module.Base
+ Config `yaml:",inline" json:""`
charts *module.Charts
@@ -43,9 +49,8 @@ type (
cleanupLastTime time.Time
cleanupEvery time.Duration
-
- devices map[string]bool
- peers map[string]bool
+ devices map[string]bool
+ peers map[string]bool
}
wgClient interface {
Devices() ([]*wgtypes.Device, error)
@@ -53,12 +58,24 @@ type (
}
)
-func (w *WireGuard) Init() bool {
- return true
+func (w *WireGuard) Configuration() any {
+ return w.Config
}
-func (w *WireGuard) Check() bool {
- return len(w.Collect()) > 0
+func (w *WireGuard) Init() error {
+ return nil
+}
+
+func (w *WireGuard) Check() error {
+ mx, err := w.collect()
+ if err != nil {
+ w.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (w *WireGuard) Charts() *module.Charts {
diff --git a/modules/wireguard/wireguard_test.go b/modules/wireguard/wireguard_test.go
index 5e6434dcc..bce7b9820 100644
--- a/modules/wireguard/wireguard_test.go
+++ b/modules/wireguard/wireguard_test.go
@@ -5,6 +5,7 @@ package wireguard
import (
"errors"
"fmt"
+ "os"
"strings"
"testing"
"time"
@@ -16,8 +17,26 @@ import (
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestWireGuard_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &WireGuard{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestWireGuard_Init(t *testing.T) {
- assert.True(t, New().Init())
+ assert.NoError(t, New().Init())
}
func TestWireGuard_Charts(t *testing.T) {
@@ -36,15 +55,15 @@ func TestWireGuard_Cleanup(t *testing.T) {
},
"after Init": {
wantClose: false,
- prepare: func(w *WireGuard) { w.Init() },
+ prepare: func(w *WireGuard) { _ = w.Init() },
},
"after Check": {
wantClose: true,
- prepare: func(w *WireGuard) { w.Init(); w.Check() },
+ prepare: func(w *WireGuard) { _ = w.Init(); _ = w.Check() },
},
"after Collect": {
wantClose: true,
- prepare: func(w *WireGuard) { w.Init(); w.Collect() },
+ prepare: func(w *WireGuard) { _ = w.Init(); _ = w.Collect() },
},
}
@@ -114,13 +133,13 @@ func TestWireGuard_Check(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
w := New()
- require.True(t, w.Init())
+ require.NoError(t, w.Init())
test.prepare(w)
if test.wantFail {
- assert.False(t, w.Check())
+ assert.Error(t, w.Check())
} else {
- assert.True(t, w.Check())
+ assert.NoError(t, w.Check())
}
})
}
@@ -411,7 +430,7 @@ func TestWireGuard_Collect(t *testing.T) {
for name, test := range tests {
t.Run(name, func(t *testing.T) {
w := New()
- require.True(t, w.Init())
+ require.NoError(t, w.Init())
m := &mockClient{}
w.client = m
diff --git a/modules/x509check/config_schema.json b/modules/x509check/config_schema.json
index 5194715ae..366c60435 100644
--- a/modules/x509check/config_schema.json
+++ b/modules/x509check/config_schema.json
@@ -1,54 +1,103 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "type": "object",
- "title": "go.d/x509check job configuration schema.",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "X509Check collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The data collection frequency in seconds.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "source": {
+ "title": "Certificate source",
+ "description": "The source of the certificate. Allowed schemes: https, tcp, tcp4, tcp6, udp, udp4, udp6, file.",
+ "type": "string"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout in seconds for the certificate retrieval.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "check_revocation_status": {
+ "title": "Revocation status check",
+ "description": "Whether to check the revocation status of the certificate.",
+ "type": "boolean"
+ },
+ "days_until_expiration_warning": {
+ "title": "Days until warning",
+ "description": "Number of days before the alarm status is set to warning.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 14
+ },
+ "days_until_expiration_critical": {
+ "title": "Days until critical",
+ "description": "Number of days before the alarm status is set to critical.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 7
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "source": {
- "type": "string"
+ "required": [
+ "source"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tlscfg": {
- "type": "object",
- "properties": {
- "tls_ca": {
- "type": "string"
- },
- "tls_cert": {
- "type": "string"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "source",
+ "timeout",
+ "check_revocation_status",
+ "days_until_expiration_warning",
+ "days_until_expiration_critical"
+ ]
},
- "tls_key": {
- "type": "string"
- },
- "tls_skip_verify": {
- "type": "boolean"
+ {
+ "title": "TLS",
+ "fields": [
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
}
- },
- "required": [
- "tls_ca",
- "tls_cert",
- "tls_key"
]
- },
- "days_until_expiration_warning": {
- "type": "integer"
- },
- "days_until_expiration_critical": {
- "type": "integer"
- },
- "check_revocation_status": {
- "type": "boolean"
}
- },
- "required": [
- "name",
- "source"
- ]
+ }
}
diff --git a/modules/x509check/provider.go b/modules/x509check/provider.go
index c5ac4d711..86d10176c 100644
--- a/modules/x509check/provider.go
+++ b/modules/x509check/provider.go
@@ -59,10 +59,10 @@ func newProvider(config Config) (provider, error) {
if sourceURL.Scheme == "https" {
sourceURL.Scheme = "tcp"
}
- return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil
+ return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil
case "smtp":
sourceURL.Scheme = "tcp"
- return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil
+ return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil
default:
return nil, fmt.Errorf("unsupported scheme '%s'", sourceURL)
}
diff --git a/modules/x509check/testdata/config.json b/modules/x509check/testdata/config.json
new file mode 100644
index 000000000..9bb2dade4
--- /dev/null
+++ b/modules/x509check/testdata/config.json
@@ -0,0 +1,12 @@
+{
+ "update_every": 123,
+ "source": "ok",
+ "timeout": 123.123,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true,
+ "days_until_expiration_warning": 123,
+ "days_until_expiration_critical": 123,
+ "check_revocation_status": true
+}
diff --git a/modules/x509check/testdata/config.yaml b/modules/x509check/testdata/config.yaml
new file mode 100644
index 000000000..e1f273f56
--- /dev/null
+++ b/modules/x509check/testdata/config.yaml
@@ -0,0 +1,10 @@
+update_every: 123
+source: "ok"
+timeout: 123.123
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
+days_until_expiration_warning: 123
+days_until_expiration_critical: 123
+check_revocation_status: yes
diff --git a/modules/x509check/x509check.go b/modules/x509check/x509check.go
index ed3a10b2f..44cab53ef 100644
--- a/modules/x509check/x509check.go
+++ b/modules/x509check/x509check.go
@@ -4,6 +4,7 @@ package x509check
import (
_ "embed"
+ "errors"
"time"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
@@ -30,7 +31,7 @@ func init() {
func New() *X509Check {
return &X509Check{
Config: Config{
- Timeout: web.Duration{Duration: time.Second * 2},
+ Timeout: web.Duration(time.Second * 2),
DaysUntilWarn: 14,
DaysUntilCritical: 7,
},
@@ -38,41 +39,56 @@ func New() *X509Check {
}
type Config struct {
- Source string
- Timeout web.Duration
- tlscfg.TLSConfig `yaml:",inline"`
- DaysUntilWarn int64 `yaml:"days_until_expiration_warning"`
- DaysUntilCritical int64 `yaml:"days_until_expiration_critical"`
- CheckRevocation bool `yaml:"check_revocation_status"`
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Source string `yaml:"source" json:"source"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ DaysUntilWarn int64 `yaml:"days_until_expiration_warning" json:"days_until_expiration_warning"`
+ DaysUntilCritical int64 `yaml:"days_until_expiration_critical" json:"days_until_expiration_critical"`
+ CheckRevocation bool `yaml:"check_revocation_status" json:"check_revocation_status"`
}
type X509Check struct {
module.Base
- Config `yaml:",inline"`
+ Config `yaml:",inline" json:""`
+
charts *module.Charts
- prov provider
+
+ prov provider
}
-func (x *X509Check) Init() bool {
+func (x *X509Check) Configuration() any {
+ return x.Config
+}
+
+func (x *X509Check) Init() error {
if err := x.validateConfig(); err != nil {
x.Errorf("config validation: %v", err)
- return false
+ return err
}
prov, err := x.initProvider()
if err != nil {
x.Errorf("certificate provider init: %v", err)
- return false
+ return err
}
x.prov = prov
x.charts = x.initCharts()
- return true
+ return nil
}
-func (x *X509Check) Check() bool {
- return len(x.Collect()) > 0
+func (x *X509Check) Check() error {
+ mx, err := x.collect()
+ if err != nil {
+ x.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
func (x *X509Check) Charts() *module.Charts {
diff --git a/modules/x509check/x509check_test.go b/modules/x509check/x509check_test.go
index 2c628af0a..0acde68ae 100644
--- a/modules/x509check/x509check_test.go
+++ b/modules/x509check/x509check_test.go
@@ -5,14 +5,34 @@ package x509check
import (
"crypto/x509"
"errors"
+ "os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
+var (
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+)
+
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ } {
+ assert.NotNil(t, data, name)
+ }
+}
+
+func TestX509Check_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &X509Check{}, dataConfigJSON, dataConfigYAML)
+}
+
func TestX509Check_Cleanup(t *testing.T) {
assert.NotPanics(t, New().Cleanup)
}
@@ -20,7 +40,7 @@ func TestX509Check_Cleanup(t *testing.T) {
func TestX509Check_Charts(t *testing.T) {
x509Check := New()
x509Check.Source = "https://example.com"
- require.True(t, x509Check.Init())
+ require.NoError(t, x509Check.Init())
assert.NotNil(t, x509Check.Charts())
}
@@ -70,9 +90,9 @@ func TestX509Check_Init(t *testing.T) {
x509Check.Config = test.config
if test.err {
- assert.False(t, x509Check.Init())
+ assert.Error(t, x509Check.Init())
} else {
- require.True(t, x509Check.Init())
+ require.NoError(t, x509Check.Init())
var typeOK bool
switch test.providerType {
@@ -94,20 +114,20 @@ func TestX509Check_Check(t *testing.T) {
x509Check := New()
x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}}
- assert.True(t, x509Check.Check())
+ assert.NoError(t, x509Check.Check())
}
func TestX509Check_Check_ReturnsFalseOnProviderError(t *testing.T) {
x509Check := New()
x509Check.prov = &mockProvider{err: true}
- assert.False(t, x509Check.Check())
+ assert.Error(t, x509Check.Check())
}
func TestX509Check_Collect(t *testing.T) {
x509Check := New()
x509Check.Source = "https://example.com"
- require.True(t, x509Check.Init())
+ require.NoError(t, x509Check.Init())
x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}}
collected := x509Check.Collect()
diff --git a/modules/zookeeper/collect.go b/modules/zookeeper/collect.go
index 97d6f3e6c..86491e1b1 100644
--- a/modules/zookeeper/collect.go
+++ b/modules/zookeeper/collect.go
@@ -14,10 +14,12 @@ func (z *Zookeeper) collect() (map[string]int64, error) {
func (z *Zookeeper) collectMntr() (map[string]int64, error) {
const command = "mntr"
+
lines, err := z.fetch("mntr")
if err != nil {
return nil, err
}
+
switch len(lines) {
case 0:
return nil, fmt.Errorf("'%s' command returned empty response", command)
@@ -27,6 +29,7 @@ func (z *Zookeeper) collectMntr() (map[string]int64, error) {
}
mx := make(map[string]int64)
+
for _, line := range lines {
parts := strings.Fields(line)
if len(parts) != 2 || !strings.HasPrefix(parts[0], "zk_") {
@@ -56,6 +59,7 @@ func (z *Zookeeper) collectMntr() (map[string]int64, error) {
if len(mx) == 0 {
return nil, fmt.Errorf("'%s' command: failed to parse response", command)
}
+
return mx, nil
}
diff --git a/modules/zookeeper/config_schema.json b/modules/zookeeper/config_schema.json
index 259987aba..89a9ca2f1 100644
--- a/modules/zookeeper/config_schema.json
+++ b/modules/zookeeper/config_schema.json
@@ -1,38 +1,88 @@
{
- "$schema": "http://json-schema.org/draft-07/schema#",
- "type": "object",
- "title": "go.d/zookeeper job configuration schema.",
- "properties": {
- "name": {
- "type": "string"
+ "jsonSchema": {
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Zookeeper collector configuration.",
+ "type": "object",
+ "properties": {
+ "update_every": {
+ "title": "Update every",
+ "description": "The frequency, in seconds, at which data is collected from the Zookeeper server.",
+ "type": "integer",
+ "minimum": 1,
+ "default": 1
+ },
+ "address": {
+ "title": "Address",
+ "description": "The IP address and port where the Zookeeper server listens for connections.",
+ "type": "string",
+ "default": "127.0.0.1:2181"
+ },
+ "timeout": {
+ "title": "Timeout",
+ "description": "The timeout, in seconds, for connection, read, write, and SSL handshake operations.",
+ "type": "number",
+ "minimum": 0.5,
+ "default": 1
+ },
+ "use_tls": {
+ "title": "Use TLS",
+ "description": "Indicates whether TLS should be used for secure communication.",
+ "type": "boolean"
+ },
+ "tls_skip_verify": {
+ "title": "Skip TLS verification",
+ "description": "If set to true, TLS certificate verification will be skipped.",
+ "type": "boolean"
+ },
+ "tls_ca": {
+ "title": "TLS CA",
+ "description": "The path to the CA certificate file for TLS verification.",
+ "type": "string"
+ },
+ "tls_cert": {
+ "title": "TLS certificate",
+ "description": "The path to the client certificate file for TLS authentication.",
+ "type": "string"
+ },
+ "tls_key": {
+ "title": "TLS key",
+ "description": "The path to the client key file for TLS authentication.",
+ "type": "string"
+ }
},
- "address": {
- "type": "string"
+ "required": [
+ "address"
+ ]
+ },
+ "uiSchema": {
+ "uiOptions": {
+ "fullPage": true
},
"timeout": {
- "type": [
- "string",
- "integer"
- ]
- },
- "use_tls": {
- "type": "boolean"
- },
- "tls_ca": {
- "type": "string"
+ "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)."
},
- "tls_cert": {
- "type": "string"
- },
- "tls_key": {
- "type": "string"
- },
- "insecure_skip_verify": {
- "type": "boolean"
+ "ui:flavour": "tabs",
+ "ui:options": {
+ "tabs": [
+ {
+ "title": "Base",
+ "fields": [
+ "update_every",
+ "address",
+ "timeout"
+ ]
+ },
+ {
+ "title": "TLS",
+ "fields": [
+ "use_tls",
+ "tls_skip_verify",
+ "tls_ca",
+ "tls_cert",
+ "tls_key"
+ ]
+ }
+ ]
}
- },
- "required": [
- "name",
- "address"
- ]
+ }
}
diff --git a/modules/zookeeper/fetcher.go b/modules/zookeeper/fetcher.go
index 7c3aae0ea..cd9eed90d 100644
--- a/modules/zookeeper/fetcher.go
+++ b/modules/zookeeper/fetcher.go
@@ -39,9 +39,12 @@ func (c *zookeeperFetcher) fetch(command string) (rows []string, err error) {
if err != nil {
return nil, err
}
+
return rows, nil
}
+func (c *zookeeperFetcher) disconnect() {}
+
func isZKLine(line []byte) bool {
return bytes.HasPrefix(line, []byte("zk_"))
}
diff --git a/modules/zookeeper/init.go b/modules/zookeeper/init.go
new file mode 100644
index 000000000..d865a0949
--- /dev/null
+++ b/modules/zookeeper/init.go
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+package zookeeper
+
+import (
+ "crypto/tls"
+ "errors"
+ "fmt"
+
+ "github.com/netdata/go.d.plugin/pkg/socket"
+ "github.com/netdata/go.d.plugin/pkg/tlscfg"
+)
+
+func (z *Zookeeper) verifyConfig() error {
+ if z.Address == "" {
+ return errors.New("address not set")
+ }
+ return nil
+}
+
+func (z *Zookeeper) initZookeeperFetcher() (fetcher, error) {
+ var tlsConf *tls.Config
+ var err error
+
+ if z.UseTLS {
+ tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig)
+ if err != nil {
+ return nil, fmt.Errorf("creating tls config : %v", err)
+ }
+ }
+
+ sock := socket.New(socket.Config{
+ Address: z.Address,
+ ConnectTimeout: z.Timeout.Duration(),
+ ReadTimeout: z.Timeout.Duration(),
+ WriteTimeout: z.Timeout.Duration(),
+ TLSConf: tlsConf,
+ })
+
+ return &zookeeperFetcher{Client: sock}, nil
+}
diff --git a/modules/zookeeper/testdata/config.json b/modules/zookeeper/testdata/config.json
new file mode 100644
index 000000000..0cf6c4727
--- /dev/null
+++ b/modules/zookeeper/testdata/config.json
@@ -0,0 +1,10 @@
+{
+ "update_every": 123,
+ "address": "ok",
+ "timeout": 123.123,
+ "use_tls": true,
+ "tls_ca": "ok",
+ "tls_cert": "ok",
+ "tls_key": "ok",
+ "tls_skip_verify": true
+}
diff --git a/modules/zookeeper/testdata/config.yaml b/modules/zookeeper/testdata/config.yaml
new file mode 100644
index 000000000..54456cc80
--- /dev/null
+++ b/modules/zookeeper/testdata/config.yaml
@@ -0,0 +1,8 @@
+update_every: 123
+address: "ok"
+timeout: 123.123
+use_tls: yes
+tls_ca: "ok"
+tls_cert: "ok"
+tls_key: "ok"
+tls_skip_verify: yes
diff --git a/modules/zookeeper/zookeeper.go b/modules/zookeeper/zookeeper.go
index 29ab1f858..fd96b4ca8 100644
--- a/modules/zookeeper/zookeeper.go
+++ b/modules/zookeeper/zookeeper.go
@@ -3,16 +3,13 @@
package zookeeper
import (
- "crypto/tls"
_ "embed"
- "fmt"
+ "errors"
"time"
- "github.com/netdata/go.d.plugin/pkg/socket"
+ "github.com/netdata/go.d.plugin/agent/module"
"github.com/netdata/go.d.plugin/pkg/tlscfg"
"github.com/netdata/go.d.plugin/pkg/web"
-
- "github.com/netdata/go.d.plugin/agent/module"
)
//go:embed "config_schema.json"
@@ -25,80 +22,71 @@ func init() {
})
}
-// Config is the Zookeeper module configuration.
-type Config struct {
- Address string
- Timeout web.Duration `yaml:"timeout"`
- UseTLS bool `yaml:"use_tls"`
- tlscfg.TLSConfig `yaml:",inline"`
-}
-
-// New creates Zookeeper with default values.
func New() *Zookeeper {
- config := Config{
- Address: "127.0.0.1:2181",
- Timeout: web.Duration{Duration: time.Second},
- UseTLS: false,
- }
- return &Zookeeper{Config: config}
+ return &Zookeeper{
+ Config: Config{
+ Address: "127.0.0.1:2181",
+ Timeout: web.Duration(time.Second),
+ UseTLS: false,
+ }}
}
-type fetcher interface {
- fetch(command string) ([]string, error)
-}
-
-// Zookeeper Zookeeper module.
-type Zookeeper struct {
- module.Base
- fetcher
- Config `yaml:",inline"`
+type Config struct {
+ tlscfg.TLSConfig `yaml:",inline" json:""`
+ UpdateEvery int `yaml:"update_every" json:"update_every"`
+ Address string `yaml:"address" json:"address"`
+ Timeout web.Duration `yaml:"timeout" json:"timeout"`
+ UseTLS bool `yaml:"use_tls" json:"use_tls"`
}
-// Cleanup makes cleanup.
-func (Zookeeper) Cleanup() {}
+type (
+ Zookeeper struct {
+ module.Base
+ Config `yaml:",inline" json:""`
-func (z *Zookeeper) createZookeeperFetcher() (err error) {
- var tlsConf *tls.Config
- if z.UseTLS {
- tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig)
- if err != nil {
- return fmt.Errorf("error on creating tls config : %v", err)
- }
+ fetcher
}
+ fetcher interface {
+ fetch(command string) ([]string, error)
+ }
+)
- sock := socket.New(socket.Config{
- Address: z.Address,
- ConnectTimeout: z.Timeout.Duration,
- ReadTimeout: z.Timeout.Duration,
- WriteTimeout: z.Timeout.Duration,
- TLSConf: tlsConf,
- })
- z.fetcher = &zookeeperFetcher{Client: sock}
- return nil
+func (z *Zookeeper) Configuration() any {
+ return z.Config
}
-// Init makes initialization.
-func (z *Zookeeper) Init() bool {
- err := z.createZookeeperFetcher()
+func (z *Zookeeper) Init() error {
+ if err := z.verifyConfig(); err != nil {
+ z.Error(err)
+ return err
+ }
+
+ f, err := z.initZookeeperFetcher()
if err != nil {
z.Error(err)
- return false
+ return err
}
+ z.fetcher = f
- return true
+ return nil
}
-// Check makes check.
-func (z *Zookeeper) Check() bool {
- return len(z.Collect()) > 0
+func (z *Zookeeper) Check() error {
+ mx, err := z.collect()
+ if err != nil {
+ z.Error(err)
+ return err
+ }
+ if len(mx) == 0 {
+ return errors.New("no metrics collected")
+ }
+ return nil
}
-// Charts creates Charts.
-func (Zookeeper) Charts() *Charts {
+func (z *Zookeeper) Charts() *Charts {
return charts.Copy()
}
-// Collect collects metrics.
func (z *Zookeeper) Collect() map[string]int64 {
mx, err := z.collect()
if err != nil {
@@ -110,3 +98,5 @@ func (z *Zookeeper) Collect() map[string]int64 {
}
return mx
}
+
+func (z *Zookeeper) Cleanup() {}
diff --git a/modules/zookeeper/zookeeper_test.go b/modules/zookeeper/zookeeper_test.go
index 13f3632c2..496afdab5 100644
--- a/modules/zookeeper/zookeeper_test.go
+++ b/modules/zookeeper/zookeeper_test.go
@@ -9,30 +9,39 @@ import (
"os"
"testing"
+ "github.com/netdata/go.d.plugin/agent/module"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var (
- testMntrData, _ = os.ReadFile("testdata/mntr.txt")
- testMntrNotInWhiteListData, _ = os.ReadFile("testdata/mntr_notinwhitelist.txt")
+ dataConfigJSON, _ = os.ReadFile("testdata/config.json")
+ dataConfigYAML, _ = os.ReadFile("testdata/config.yaml")
+
+ dataMntrMetrics, _ = os.ReadFile("testdata/mntr.txt")
+ dataMntrNotInWhiteListResponse, _ = os.ReadFile("testdata/mntr_notinwhitelist.txt")
)
-func Test_testDataLoad(t *testing.T) {
- assert.NotNil(t, testMntrData)
- assert.NotNil(t, testMntrNotInWhiteListData)
+func Test_testDataIsValid(t *testing.T) {
+ for name, data := range map[string][]byte{
+ "dataConfigJSON": dataConfigJSON,
+ "dataConfigYAML": dataConfigYAML,
+ "dataMntrMetrics": dataMntrMetrics,
+ "dataMntrNotInWhiteListResponse": dataMntrNotInWhiteListResponse,
+ } {
+ assert.NotNil(t, data, name)
+ }
}
-func TestNew(t *testing.T) {
- job := New()
-
- assert.IsType(t, (*Zookeeper)(nil), job)
+func TestZookeeper_ConfigurationSerialize(t *testing.T) {
+ module.TestConfigurationSerialize(t, &Zookeeper{}, dataConfigJSON, dataConfigYAML)
}
func TestZookeeper_Init(t *testing.T) {
job := New()
- assert.True(t, job.Init())
+ assert.NoError(t, job.Init())
assert.NotNil(t, job.fetcher)
}
@@ -41,23 +50,23 @@ func TestZookeeper_InitErrorOnCreatingTLSConfig(t *testing.T) {
job.UseTLS = true
job.TLSConfig.TLSCA = "testdata/tls"
- assert.False(t, job.Init())
+ assert.Error(t, job.Init())
}
func TestZookeeper_Check(t *testing.T) {
job := New()
- require.True(t, job.Init())
- job.fetcher = &mockZookeeperFetcher{data: testMntrData}
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{data: dataMntrMetrics}
- assert.True(t, job.Check())
+ assert.NoError(t, job.Check())
}
func TestZookeeper_CheckErrorOnFetch(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.fetcher = &mockZookeeperFetcher{err: true}
- assert.False(t, job.Check())
+ assert.Error(t, job.Check())
}
func TestZookeeper_Charts(t *testing.T) {
@@ -70,8 +79,8 @@ func TestZookeeper_Cleanup(t *testing.T) {
func TestZookeeper_Collect(t *testing.T) {
job := New()
- require.True(t, job.Init())
- job.fetcher = &mockZookeeperFetcher{data: testMntrData}
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{data: dataMntrMetrics}
expected := map[string]int64{
"approximate_data_size": 44,
@@ -98,15 +107,15 @@ func TestZookeeper_Collect(t *testing.T) {
func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) {
job := New()
- require.True(t, job.Init())
- job.fetcher = &mockZookeeperFetcher{data: testMntrNotInWhiteListData}
+ require.NoError(t, job.Init())
+ job.fetcher = &mockZookeeperFetcher{data: dataMntrNotInWhiteListResponse}
assert.Nil(t, job.Collect())
}
func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.fetcher = &mockZookeeperFetcher{}
assert.Nil(t, job.Collect())
@@ -114,7 +123,7 @@ func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) {
func TestZookeeper_CollectMntrInvalidData(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.fetcher = &mockZookeeperFetcher{data: []byte("hello \nand good buy\n")}
assert.Nil(t, job.Collect())
@@ -122,7 +131,7 @@ func TestZookeeper_CollectMntrInvalidData(t *testing.T) {
func TestZookeeper_CollectMntrReceiveError(t *testing.T) {
job := New()
- require.True(t, job.Init())
+ require.NoError(t, job.Init())
job.fetcher = &mockZookeeperFetcher{err: true}
assert.Nil(t, job.Collect())
diff --git a/pkg/logs/csv.go b/pkg/logs/csv.go
index 3a7610a70..0b7d90009 100644
--- a/pkg/logs/csv.go
+++ b/pkg/logs/csv.go
@@ -14,11 +14,11 @@ import (
type (
CSVConfig struct {
- FieldsPerRecord int `yaml:"fields_per_record"`
- Delimiter string `yaml:"delimiter"`
- TrimLeadingSpace bool `yaml:"trim_leading_space"`
- Format string `yaml:"format"`
- CheckField func(string) (string, int, bool) `yaml:"-"`
+ FieldsPerRecord int `yaml:"fields_per_record" json:"fields_per_record"`
+ Delimiter string `yaml:"delimiter" json:"delimiter"`
+ TrimLeadingSpace bool `yaml:"trim_leading_space" json:"trim_leading_space"`
+ Format string `yaml:"format" json:"format"`
+ CheckField func(string) (string, int, bool) `yaml:"-" json:"-"`
}
CSVParser struct {
diff --git a/pkg/logs/json.go b/pkg/logs/json.go
index cfd6c83e7..ceb32e272 100644
--- a/pkg/logs/json.go
+++ b/pkg/logs/json.go
@@ -12,7 +12,7 @@ import (
)
type JSONConfig struct {
- Mapping map[string]string `yaml:"mapping"`
+ Mapping map[string]string `yaml:"mapping" json:"mapping"`
}
type JSONParser struct {
diff --git a/pkg/logs/ltsv.go b/pkg/logs/ltsv.go
index 558f9e076..b7fbceb14 100644
--- a/pkg/logs/ltsv.go
+++ b/pkg/logs/ltsv.go
@@ -15,9 +15,9 @@ import (
type (
LTSVConfig struct {
- FieldDelimiter string `yaml:"field_delimiter"`
- ValueDelimiter string `yaml:"value_delimiter"`
- Mapping map[string]string `yaml:"mapping"`
+ FieldDelimiter string `yaml:"field_delimiter" json:"field_delimiter"`
+ ValueDelimiter string `yaml:"value_delimiter" json:"value_delimiter"`
+ Mapping map[string]string `yaml:"mapping" json:"mapping"`
}
LTSVParser struct {
diff --git a/pkg/logs/parser.go b/pkg/logs/parser.go
index f1807283a..d83b4309d 100644
--- a/pkg/logs/parser.go
+++ b/pkg/logs/parser.go
@@ -40,11 +40,11 @@ const (
)
type ParserConfig struct {
- LogType string `yaml:"log_type"`
- CSV CSVConfig `yaml:"csv_config"`
- LTSV LTSVConfig `yaml:"ltsv_config"`
- RegExp RegExpConfig `yaml:"regexp_config"`
- JSON JSONConfig `yaml:"json_config"`
+ LogType string `yaml:"log_type" json:"log_type"`
+ CSV CSVConfig `yaml:"csv_config" json:"csv_config"`
+ LTSV LTSVConfig `yaml:"ltsv_config" json:"ltsv_config"`
+ RegExp RegExpConfig `yaml:"regexp_config" json:"regexp_config"`
+ JSON JSONConfig `yaml:"json_config" json:"json_config"`
}
func NewParser(config ParserConfig, in io.Reader) (Parser, error) {
diff --git a/pkg/logs/regexp.go b/pkg/logs/regexp.go
index 84b725fd9..e0dee1d02 100644
--- a/pkg/logs/regexp.go
+++ b/pkg/logs/regexp.go
@@ -12,7 +12,7 @@ import (
type (
RegExpConfig struct {
- Pattern string `yaml:"pattern"`
+ Pattern string `yaml:"pattern" json:"pattern"`
}
RegExpParser struct {
diff --git a/pkg/matcher/glob.go b/pkg/matcher/glob.go
index f8cd5b072..726c94c45 100644
--- a/pkg/matcher/glob.go
+++ b/pkg/matcher/glob.go
@@ -3,11 +3,10 @@
package matcher
import (
+ "errors"
"path/filepath"
"regexp"
"unicode/utf8"
-
- "errors"
)
// globMatcher implements Matcher, it uses filepath.MatchString to match.
diff --git a/pkg/multipath/multipath.go b/pkg/multipath/multipath.go
index 041de081b..a09f3cec5 100644
--- a/pkg/multipath/multipath.go
+++ b/pkg/multipath/multipath.go
@@ -3,6 +3,7 @@
package multipath
import (
+ "errors"
"fmt"
"os"
"path/filepath"
@@ -17,11 +18,8 @@ func (e ErrNotFound) Error() string { return e.msg }
// IsNotFound returns a boolean indicating whether the error is ErrNotFound or not.
func IsNotFound(err error) bool {
- switch err.(type) {
- case ErrNotFound:
- return true
- }
- return false
+ var errNotFound ErrNotFound
+ return errors.As(err, &errNotFound)
}
// MultiPath multi-paths
diff --git a/pkg/prometheus/selector/expr.go b/pkg/prometheus/selector/expr.go
index 8d09db206..7593513a5 100644
--- a/pkg/prometheus/selector/expr.go
+++ b/pkg/prometheus/selector/expr.go
@@ -5,8 +5,8 @@ package selector
import "fmt"
type Expr struct {
- Allow []string `yaml:"allow"`
- Deny []string `yaml:"deny"`
+ Allow []string `yaml:"allow" json:"allow"`
+ Deny []string `yaml:"deny" json:"deny"`
}
func (e Expr) Empty() bool {
diff --git a/pkg/tlscfg/config.go b/pkg/tlscfg/config.go
index 26051e486..60e152e0f 100644
--- a/pkg/tlscfg/config.go
+++ b/pkg/tlscfg/config.go
@@ -12,16 +12,16 @@ import (
// TLSConfig represents the standard client TLS configuration.
type TLSConfig struct {
// TLSCA specifies the certificate authority to use when verifying server certificates.
- TLSCA string `yaml:"tls_ca"`
+ TLSCA string `yaml:"tls_ca" json:"tls_ca"`
// TLSCert specifies tls certificate file.
- TLSCert string `yaml:"tls_cert"`
+ TLSCert string `yaml:"tls_cert" json:"tls_cert"`
// TLSKey specifies tls key file.
- TLSKey string `yaml:"tls_key"`
+ TLSKey string `yaml:"tls_key" json:"tls_key"`
// InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name.
- InsecureSkipVerify bool `yaml:"tls_skip_verify"`
+ InsecureSkipVerify bool `yaml:"tls_skip_verify" json:"tls_skip_verify"`
}
// NewTLSConfig creates a tls.Config, may be nil without an error if TLS is not configured.
diff --git a/pkg/web/client.go b/pkg/web/client.go
index ae3ecd462..32f18271e 100644
--- a/pkg/web/client.go
+++ b/pkg/web/client.go
@@ -21,18 +21,18 @@ var ErrRedirectAttempted = errors.New("redirect")
type Client struct {
// Timeout specifies a time limit for requests made by this Client.
// Default (zero value) is no timeout. Must be set before http.Client creation.
- Timeout Duration `yaml:"timeout"`
+ Timeout Duration `yaml:"timeout" json:"timeout"`
// NotFollowRedirect specifies the policy for handling redirects.
// Default (zero value) is std http package default policy (stop after 10 consecutive requests).
- NotFollowRedirect bool `yaml:"not_follow_redirects"`
+ NotFollowRedirect bool `yaml:"not_follow_redirects" json:"not_follow_redirects"`
// ProxyURL specifies the URL of the proxy to use. An empty string means use the environment variables
// HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof) to get the URL.
- ProxyURL string `yaml:"proxy_url"`
+ ProxyURL string `yaml:"proxy_url" json:"proxy_url"`
// TLSConfig specifies the TLS configuration.
- tlscfg.TLSConfig `yaml:",inline"`
+ tlscfg.TLSConfig `yaml:",inline" json:",inline"`
}
// NewHTTPClient returns a new *http.Client given a Client configuration and an error if any.
@@ -48,17 +48,17 @@ func NewHTTPClient(cfg Client) (*http.Client, error) {
}
}
- d := &net.Dialer{Timeout: cfg.Timeout.Duration}
+ d := &net.Dialer{Timeout: cfg.Timeout.Duration()}
transport := &http.Transport{
Proxy: proxyFunc(cfg.ProxyURL),
TLSClientConfig: tlsConfig,
DialContext: d.DialContext,
- TLSHandshakeTimeout: cfg.Timeout.Duration,
+ TLSHandshakeTimeout: cfg.Timeout.Duration(),
}
return &http.Client{
- Timeout: cfg.Timeout.Duration,
+ Timeout: cfg.Timeout.Duration(),
Transport: transport,
CheckRedirect: redirectFunc(cfg.NotFollowRedirect),
}, nil
diff --git a/pkg/web/client_test.go b/pkg/web/client_test.go
index e11d6ce47..ead1486c3 100644
--- a/pkg/web/client_test.go
+++ b/pkg/web/client_test.go
@@ -12,7 +12,7 @@ import (
func TestNewHTTPClient(t *testing.T) {
client, _ := NewHTTPClient(Client{
- Timeout: Duration{Duration: time.Second * 5},
+ Timeout: Duration(time.Second * 5),
NotFollowRedirect: true,
ProxyURL: "http://127.0.0.1:3128",
})
diff --git a/pkg/web/duration.go b/pkg/web/duration.go
index ced991f91..85d5ef650 100644
--- a/pkg/web/duration.go
+++ b/pkg/web/duration.go
@@ -3,17 +3,22 @@
package web
import (
+ "encoding/json"
"fmt"
"strconv"
"time"
)
-// Duration is a time.Duration wrapper.
-type Duration struct {
- Duration time.Duration
+type Duration time.Duration
+
+func (d Duration) Duration() time.Duration {
+ return time.Duration(d)
+}
+
+func (d Duration) String() string {
+ return d.Duration().String()
}
-// UnmarshalYAML implements yaml.Unmarshaler.
func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
var s string
@@ -22,18 +27,46 @@ func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error {
}
if v, err := time.ParseDuration(s); err == nil {
- d.Duration = v
+ *d = Duration(v)
return nil
}
if v, err := strconv.ParseInt(s, 10, 64); err == nil {
- d.Duration = time.Duration(v) * time.Second
+ *d = Duration(time.Duration(v) * time.Second)
return nil
}
if v, err := strconv.ParseFloat(s, 64); err == nil {
- d.Duration = time.Duration(v) * time.Second
+ *d = Duration(v * float64(time.Second))
return nil
}
+
return fmt.Errorf("unparsable duration format '%s'", s)
}
-func (d Duration) String() string { return d.Duration.String() }
+func (d Duration) MarshalYAML() (any, error) {
+ seconds := float64(d) / float64(time.Second)
+ return seconds, nil
+}
+
+func (d *Duration) UnmarshalJSON(b []byte) error {
+ s := string(b)
+
+ if v, err := time.ParseDuration(s); err == nil {
+ *d = Duration(v)
+ return nil
+ }
+ if v, err := strconv.ParseInt(s, 10, 64); err == nil {
+ *d = Duration(time.Duration(v) * time.Second)
+ return nil
+ }
+ if v, err := strconv.ParseFloat(s, 64); err == nil {
+ *d = Duration(v * float64(time.Second))
+ return nil
+ }
+
+ return fmt.Errorf("unparsable duration format '%s'", s)
+}
+
+func (d Duration) MarshalJSON() ([]byte, error) {
+ seconds := float64(d) / float64(time.Second)
+ return json.Marshal(seconds)
+}
diff --git a/pkg/web/duration_test.go b/pkg/web/duration_test.go
index 01ee19dd2..b45063f13 100644
--- a/pkg/web/duration_test.go
+++ b/pkg/web/duration_test.go
@@ -3,22 +3,112 @@
package web
import (
+ "encoding/json"
+ "fmt"
+ "strings"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
"gopkg.in/yaml.v2"
)
+func TestDuration_MarshalYAML(t *testing.T) {
+ tests := map[string]struct {
+ d Duration
+ want string
+ }{
+ "1 second": {d: Duration(time.Second), want: "1"},
+ "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ bs, err := yaml.Marshal(&test.d)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.want, strings.TrimSpace(string(bs)))
+ })
+ }
+}
+
+func TestDuration_MarshalJSON(t *testing.T) {
+ tests := map[string]struct {
+ d Duration
+ want string
+ }{
+ "1 second": {d: Duration(time.Second), want: "1"},
+ "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"},
+ }
+
+ for name, test := range tests {
+ t.Run(name, func(t *testing.T) {
+ bs, err := json.Marshal(&test.d)
+ require.NoError(t, err)
+
+ assert.Equal(t, test.want, strings.TrimSpace(string(bs)))
+ })
+ }
+}
+
func TestDuration_UnmarshalYAML(t *testing.T) {
- var d Duration
- values := [][]byte{
- []byte("100ms"), // duration
- []byte("3s300ms"), // duration
- []byte("3"), // int
- []byte("3.3"), // float
+ tests := map[string]struct {
+ input any
+ }{
+ "duration": {input: "300ms"},
+ "string int": {input: "1"},
+ "string float": {input: "1.1"},
+ "int": {input: 2},
+ "float": {input: 2.2},
}
- for _, v := range values {
- assert.NoError(t, yaml.Unmarshal(v, &d))
+ var zero Duration
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (%v)", name, test.input)
+ t.Run(name, func(t *testing.T) {
+ data, err := yaml.Marshal(test.input)
+ require.NoError(t, err)
+
+ var d Duration
+ require.NoError(t, yaml.Unmarshal(data, &d))
+ assert.NotEqual(t, zero.String(), d.String())
+ })
+ }
+}
+
+func TestDuration_UnmarshalJSON(t *testing.T) {
+ tests := map[string]struct {
+ input any
+ }{
+ "duration": {input: "300ms"},
+ "string int": {input: "1"},
+ "string float": {input: "1.1"},
+ "int": {input: 2},
+ "float": {input: 2.2},
+ }
+
+ var zero Duration
+
+ type duration struct {
+ D Duration `json:"d"`
+ }
+ type input struct {
+ D any `json:"d"`
+ }
+
+ for name, test := range tests {
+ name = fmt.Sprintf("%s (%v)", name, test.input)
+ t.Run(name, func(t *testing.T) {
+ input := input{D: test.input}
+ data, err := yaml.Marshal(input)
+ require.NoError(t, err)
+
+ var d duration
+ require.NoError(t, yaml.Unmarshal(data, &d))
+ assert.NotEqual(t, zero.String(), d.D.String())
+ })
}
}
diff --git a/pkg/web/request.go b/pkg/web/request.go
index 5740da6d1..3db08f734 100644
--- a/pkg/web/request.go
+++ b/pkg/web/request.go
@@ -14,30 +14,30 @@ import (
// Supported configuration file formats: YAML.
type Request struct {
// URL specifies the URL to access.
- URL string `yaml:"url"`
+ URL string `yaml:"url" json:"url"`
// Body specifies the HTTP request body to be sent by the client.
- Body string `yaml:"body"`
+ Body string `yaml:"body" json:"body"`
// Method specifies the HTTP method (GET, POST, PUT, etc.). An empty string means GET.
- Method string `yaml:"method"`
+ Method string `yaml:"method" json:"method"`
// Headers specifies the HTTP request header fields to be sent by the client.
- Headers map[string]string `yaml:"headers"`
+ Headers map[string]string `yaml:"headers" json:"headers"`
// Username specifies the username for basic HTTP authentication.
- Username string `yaml:"username"`
+ Username string `yaml:"username" json:"username"`
// Password specifies the password for basic HTTP authentication.
- Password string `yaml:"password"`
+ Password string `yaml:"password" json:"password"`
// ProxyUsername specifies the username for basic HTTP authentication.
// It is used to authenticate a user agent to a proxy server.
- ProxyUsername string `yaml:"proxy_username"`
+ ProxyUsername string `yaml:"proxy_username" json:"proxy_username"`
// ProxyPassword specifies the password for basic HTTP authentication.
// It is used to authenticate a user agent to a proxy server.
- ProxyPassword string `yaml:"proxy_password"`
+ ProxyPassword string `yaml:"proxy_password" json:"proxy_password"`
}
// Copy makes a full copy of the Request.
diff --git a/pkg/web/web.go b/pkg/web/web.go
index e2a7098ba..07cef4839 100644
--- a/pkg/web/web.go
+++ b/pkg/web/web.go
@@ -6,6 +6,6 @@ package web
// This structure intended to be part of the module configuration.
// Supported configuration file formats: YAML.
type HTTP struct {
- Request `yaml:",inline"`
- Client `yaml:",inline"`
+ Request `yaml:",inline" json:",inline"`
+ Client `yaml:",inline" json:",inline"`
}