From 292bf5a9e28f71fb96a3c623db1553be307b3a56 Mon Sep 17 00:00:00 2001 From: ilyam8 Date: Tue, 16 Jan 2024 17:20:04 +0200 Subject: [PATCH] dyncfgv2 --- README.md | 3 - agent/agent.go | 55 +- agent/agent_test.go | 8 +- agent/confgroup/cache.go | 93 -- agent/confgroup/cache_test.go | 134 -- agent/confgroup/config.go | 171 +++ agent/confgroup/group.go | 124 +- agent/confgroup/group_test.go | 2 +- agent/discovery/dummy/discovery.go | 8 +- agent/discovery/dyncfg/config.go | 35 - agent/discovery/dyncfg/dyncfg.go | 256 ---- agent/discovery/dyncfg/dyncfg_test.go | 239 --- agent/discovery/dyncfg/ext.go | 79 - agent/discovery/file/parse.go | 15 +- agent/discovery/file/read.go | 11 +- agent/discovery/file/watch.go | 5 +- agent/discovery/manager.go | 10 +- agent/discovery/sd/hostsocket/net.go | 20 +- agent/discovery/sd/pipeline/config.go | 6 +- agent/discovery/sd/pipeline/funcmap.go | 46 +- agent/discovery/sd/pipeline/funcmap_test.go | 62 +- agent/discovery/sd/pipeline/pipeline.go | 7 +- agent/discovery/sd/pipeline/pipeline_test.go | 9 + agent/discovery/sd/pipeline/qq.yaml | 34 - agent/functions/function.go | 41 +- agent/functions/manager.go | 40 +- agent/jobmgr/cache.go | 175 ++- agent/jobmgr/di.go | 27 +- agent/jobmgr/dyncfg.go | 635 ++++++++ agent/jobmgr/manager.go | 455 +++--- agent/jobmgr/manager_test.go | 1313 ++++++++++++++++- agent/jobmgr/noop.go | 19 +- agent/jobmgr/run.go | 73 - agent/jobmgr/sim_test.go | 131 ++ agent/module/job.go | 77 +- agent/module/job_test.go | 69 +- agent/module/mock.go | 48 +- agent/module/mock_test.go | 12 +- agent/module/module.go | 11 +- agent/module/registry.go | 5 + agent/netdataapi/api.go | 68 +- agent/netdataapi/api_test.go | 97 +- config/go.d/energid.conf | 17 - config/go.d/sd/hostsocket.yaml | 97 ++ config/go.d/solr.conf | 13 - config/go.d/springboot2.conf | 13 - examples/simple/main.go | 14 +- modules/activemq/activemq.go | 294 +--- modules/activemq/activemq_test.go | 31 +- modules/activemq/apiclient.go | 7 +- modules/activemq/collect.go | 185 +++ modules/activemq/config_schema.json | 350 ++++- modules/activemq/init.go | 32 + modules/apache/apache.go | 40 +- modules/apache/apache_test.go | 22 +- modules/apache/config_schema.json | 283 +++- modules/apache/init.go | 4 +- modules/bind/bind.go | 317 +--- modules/bind/bind_test.go | 42 +- modules/bind/collect.go | 200 +++ modules/bind/config_schema.json | 253 +++- modules/bind/init.go | 37 + modules/cassandra/cassandra.go | 39 +- modules/cassandra/cassandra_test.go | 12 +- modules/cassandra/config_schema.json | 283 +++- modules/chrony/chrony.go | 28 +- modules/chrony/chrony_test.go | 20 +- modules/chrony/client.go | 2 +- modules/chrony/config_schema.json | 51 +- modules/chrony/init.go | 2 +- modules/cockroachdb/cockroachdb.go | 106 +- modules/cockroachdb/cockroachdb_test.go | 22 +- modules/cockroachdb/config_schema.json | 284 +++- modules/cockroachdb/init.go | 25 + modules/consul/config_schema.json | 291 +++- modules/consul/consul.go | 43 +- modules/consul/consul_test.go | 24 +- modules/coredns/config_schema.json | 297 +++- modules/coredns/coredns.go | 114 +- modules/coredns/coredns_test.go | 42 +- modules/coredns/init.go | 40 + modules/couchbase/config_schema.json | 283 +++- modules/couchbase/couchbase.go | 67 +- modules/couchbase/couchbase_test.go | 16 +- modules/couchbase/init.go | 4 +- modules/couchdb/collect.go | 6 +- modules/couchdb/config_schema.json | 299 +++- modules/couchdb/couchdb.go | 72 +- modules/couchdb/couchdb_test.go | 16 +- modules/dnsdist/config_schema.json | 283 +++- modules/dnsdist/dnsdist.go | 57 +- modules/dnsdist/dnsdist_test.go | 14 +- modules/dnsdist/init.go | 6 +- modules/dnsmasq/config_schema.json | 66 +- modules/dnsmasq/dnsmasq.go | 34 +- modules/dnsmasq/dnsmasq_test.go | 14 +- modules/dnsmasq/init.go | 8 +- modules/dnsmasq_dhcp/config_schema.json | 50 +- modules/dnsmasq_dhcp/dhcp.go | 42 +- modules/dnsmasq_dhcp/dhcp_test.go | 22 +- modules/dnsquery/collect.go | 2 +- modules/dnsquery/config_schema.json | 139 +- modules/dnsquery/dnsquery.go | 36 +- modules/dnsquery/dnsquery_test.go | 26 +- modules/docker/collect.go | 8 +- modules/docker/config_schema.json | 60 +- modules/docker/docker.go | 32 +- modules/docker/docker_test.go | 18 +- modules/docker_engine/config_schema.json | 283 +++- modules/docker_engine/docker_engine.go | 93 +- modules/docker_engine/docker_engine_test.go | 24 +- modules/docker_engine/init.go | 25 + modules/dockerhub/config_schema.json | 295 +++- modules/dockerhub/dockerhub.go | 86 +- modules/dockerhub/dockerhub_test.go | 36 +- modules/dockerhub/init.go | 26 + modules/elasticsearch/config_schema.json | 326 +++- modules/elasticsearch/elasticsearch.go | 44 +- modules/elasticsearch/elasticsearch_test.go | 16 +- modules/energid/README.md | 1 - modules/energid/charts.go | 97 -- modules/energid/collect.go | 161 -- modules/energid/config_schema.json | 59 - modules/energid/energid.go | 104 -- modules/energid/energid_test.go | 285 ---- modules/energid/init.go | 31 - .../integrations/energi_core_wallet.md | 224 --- modules/energid/jsonrpc.go | 48 - modules/energid/metadata.yaml | 225 --- modules/energid/metrics.go | 49 - .../testdata/v2.4.1/getblockchaininfo.json | 66 - .../testdata/v2.4.1/getmemoryinfo.json | 14 - .../testdata/v2.4.1/getmempoolinfo.json | 11 - .../testdata/v2.4.1/getnetworkinfo.json | 41 - .../testdata/v2.4.1/gettxoutsetinfo.json | 13 - modules/envoy/config_schema.json | 283 +++- modules/envoy/envoy.go | 34 +- modules/envoy/envoy_test.go | 16 +- modules/example/config_schema.json | 121 +- modules/example/example.go | 45 +- modules/example/example_test.go | 14 +- modules/filecheck/collect_dirs.go | 4 +- modules/filecheck/collect_files.go | 4 +- modules/filecheck/config_schema.json | 140 +- modules/filecheck/filecheck.go | 39 +- modules/filecheck/filecheck_test.go | 10 +- modules/filecheck/init.go | 4 +- modules/filecheck/metadata.yaml | 2 +- modules/fluentd/collect.go | 66 + modules/fluentd/config_schema.json | 284 +++- modules/fluentd/fluentd.go | 154 +- modules/fluentd/fluentd_test.go | 33 +- modules/fluentd/init.go | 34 + modules/freeradius/config_schema.json | 61 +- modules/freeradius/freeradius.go | 78 +- modules/freeradius/freeradius_test.go | 12 +- modules/freeradius/init.go | 20 + modules/geth/config_schema.json | 283 +++- modules/geth/geth.go | 87 +- modules/geth/init.go | 24 + modules/haproxy/config_schema.json | 283 +++- modules/haproxy/haproxy.go | 45 +- modules/haproxy/haproxy_test.go | 16 +- modules/hdfs/collect.go | 85 +- modules/hdfs/config_schema.json | 283 +++- modules/hdfs/hdfs.go | 86 +- modules/hdfs/hdfs_test.go | 40 +- modules/hdfs/init.go | 25 + modules/hdfs/raw_data.go | 51 + modules/httpcheck/config_schema.json | 137 +- modules/httpcheck/httpcheck.go | 33 +- modules/httpcheck/httpcheck_test.go | 20 +- modules/init.go | 151 +- modules/isc_dhcpd/config_schema.json | 69 +- modules/isc_dhcpd/init.go | 6 +- modules/isc_dhcpd/isc_dhcpd.go | 48 +- modules/isc_dhcpd/isc_dhcpd_test.go | 14 +- modules/k8s_kubelet/config_schema.json | 284 +++- modules/k8s_kubelet/init.go | 35 + modules/k8s_kubelet/kubelet.go | 103 +- modules/k8s_kubelet/kubelet_test.go | 26 +- modules/k8s_kubeproxy/config_schema.json | 283 +++- modules/k8s_kubeproxy/init.go | 26 + modules/k8s_kubeproxy/kubeproxy.go | 79 +- modules/k8s_kubeproxy/kubeproxy_test.go | 40 +- modules/k8s_state/config_schema.json | 26 +- modules/k8s_state/kube_state.go | 42 +- modules/k8s_state/kube_state_test.go | 14 +- modules/lighttpd/config_schema.json | 283 +++- modules/lighttpd/init.go | 29 + modules/lighttpd/lighttpd.go | 76 +- modules/lighttpd/lighttpd_test.go | 37 +- modules/logind/config_schema.json | 37 +- modules/logind/logind.go | 25 +- modules/logind/logind_test.go | 18 +- modules/logstash/config_schema.json | 283 +++- modules/logstash/logstash.go | 40 +- modules/logstash/logstash_test.go | 16 +- modules/mongodb/config_schema.json | 45 +- modules/mongodb/mongodb.go | 25 +- modules/mongodb/mongodb_test.go | 12 +- modules/mysql/collect.go | 4 +- modules/mysql/config_schema.json | 57 +- modules/mysql/mysql.go | 32 +- modules/mysql/mysql_test.go | 12 +- modules/nginx/config_schema.json | 283 +++- modules/nginx/nginx.go | 79 +- modules/nginx/nginx_test.go | 41 +- modules/nginxplus/config_schema.json | 283 +++- modules/nginxplus/nginxplus.go | 33 +- modules/nginxplus/nginxplus_test.go | 16 +- modules/nginxvts/config_schema.json | 283 +++- modules/nginxvts/init.go | 6 +- modules/nginxvts/nginxvts.go | 31 +- modules/nginxvts/nginxvts_test.go | 16 +- modules/ntpd/client.go | 4 +- modules/ntpd/config_schema.json | 51 +- modules/ntpd/ntpd.go | 30 +- modules/ntpd/ntpd_test.go | 18 +- modules/nvidia_smi/config_schema.json | 49 +- modules/nvidia_smi/exec.go | 2 +- modules/nvidia_smi/nvidia_smi.go | 25 +- modules/nvidia_smi/nvidia_smi_test.go | 8 +- modules/nvme/config_schema.json | 43 +- modules/nvme/init.go | 8 +- modules/nvme/nvme.go | 27 +- modules/nvme/nvme_test.go | 8 +- modules/openvpn/config_schema.json | 99 +- modules/openvpn/init.go | 30 + modules/openvpn/openvpn.go | 103 +- modules/openvpn/openvpn_test.go | 31 +- modules/openvpn_status_log/config_schema.json | 63 +- modules/openvpn_status_log/init.go | 4 +- modules/openvpn_status_log/openvpn.go | 38 +- modules/openvpn_status_log/openvpn_test.go | 14 +- modules/pgbouncer/collect.go | 4 +- modules/pgbouncer/config_schema.json | 45 +- modules/pgbouncer/pgbouncer.go | 25 +- modules/pgbouncer/pgbouncer_test.go | 12 +- modules/phpdaemon/config_schema.json | 238 ++- modules/phpdaemon/init.go | 27 + modules/phpdaemon/phpdaemon.go | 85 +- modules/phpdaemon/phpdaemon_test.go | 35 +- modules/phpfpm/config_schema.json | 161 +- modules/phpfpm/init.go | 24 +- modules/phpfpm/phpfpm.go | 55 +- modules/phpfpm/phpfpm_test.go | 49 +- modules/pihole/config_schema.json | 290 +++- modules/pihole/pihole.go | 34 +- modules/pihole/pihole_test.go | 14 +- modules/pika/config_schema.json | 69 +- modules/pika/init.go | 6 +- modules/pika/pika.go | 29 +- modules/pika/pika_test.go | 18 +- modules/ping/config_schema.json | 91 +- modules/ping/init.go | 2 +- modules/ping/ping.go | 41 +- modules/ping/ping_test.go | 12 +- modules/portcheck/collect.go | 2 +- modules/portcheck/config_schema.json | 71 +- modules/portcheck/init.go | 18 + modules/portcheck/portcheck.go | 31 +- modules/portcheck/portcheck_test.go | 16 +- modules/postgres/collect.go | 4 +- modules/postgres/config_schema.json | 85 +- modules/postgres/do_query.go | 6 +- modules/postgres/postgres.go | 27 +- modules/postgres/postgres_test.go | 12 +- modules/powerdns/authoritativens.go | 35 +- modules/powerdns/authoritativens_test.go | 14 +- modules/powerdns/config_schema.json | 283 +++- modules/powerdns/init.go | 6 +- modules/powerdns_recursor/config_schema.json | 283 +++- modules/powerdns_recursor/init.go | 6 +- modules/powerdns_recursor/recursor.go | 35 +- modules/powerdns_recursor/recursor_test.go | 14 +- modules/prometheus/config_schema.json | 207 +-- modules/prometheus/prometheus.go | 55 +- modules/prometheus/prometheus_test.go | 14 +- modules/proxysql/collect.go | 4 +- modules/proxysql/config_schema.json | 51 +- modules/proxysql/proxysql.go | 46 +- modules/proxysql/proxysql_test.go | 12 +- modules/pulsar/cache.go | 19 + modules/pulsar/config_schema.json | 288 +++- modules/pulsar/init.go | 34 + modules/pulsar/pulsar.go | 133 +- modules/pulsar/pulsar_test.go | 22 +- modules/rabbitmq/config_schema.json | 290 +++- modules/rabbitmq/rabbitmq.go | 35 +- modules/rabbitmq/rabbitmq_test.go | 14 +- modules/redis/config_schema.json | 89 +- modules/redis/init.go | 6 +- modules/redis/redis.go | 29 +- modules/redis/redis_test.go | 18 +- modules/scaleio/collect_sdc.go | 2 +- modules/scaleio/collect_storage_pool.go | 2 +- modules/scaleio/collect_system.go | 2 +- modules/scaleio/config_schema.json | 283 +++- modules/scaleio/scaleio.go | 62 +- modules/scaleio/scaleio_test.go | 26 +- modules/snmp/config_schema.json | 337 ++--- modules/snmp/init.go | 6 +- modules/snmp/snmp.go | 29 +- modules/snmp/snmp_test.go | 20 +- modules/solr/README.md | 1 - modules/solr/charts.go | 141 -- modules/solr/config_schema.json | 59 - modules/solr/integrations/solr.md | 223 --- modules/solr/metadata.yaml | 268 ---- modules/solr/parser.go | 151 -- modules/solr/solr.go | 212 --- modules/solr/solr_test.go | 274 ---- modules/solr/testdata/core-metrics-v6.txt | 794 ---------- modules/solr/testdata/core-metrics-v7.txt | 732 --------- modules/springboot2/README.md | 1 - modules/springboot2/charts.go | 77 - modules/springboot2/config_schema.json | 76 - .../java_spring-boot_2_applications.md | 233 --- modules/springboot2/metadata.yaml | 239 --- modules/springboot2/springboot2.go | 190 --- modules/springboot2/springboot2_test.go | 103 -- modules/springboot2/tests/testdata.txt | 194 --- modules/springboot2/tests/testdata2.txt | 193 --- modules/squidlog/collect.go | 2 +- modules/squidlog/config_schema.json | 183 +-- modules/squidlog/squidlog.go | 70 +- modules/squidlog/squidlog_test.go | 18 +- modules/supervisord/config_schema.json | 41 +- modules/supervisord/init.go | 4 +- modules/supervisord/supervisord.go | 27 +- modules/supervisord/supervisord_test.go | 18 +- modules/systemdunits/collect.go | 4 +- modules/systemdunits/config_schema.json | 51 +- modules/systemdunits/systemdunits.go | 34 +- modules/systemdunits/systemdunits_test.go | 18 +- modules/tengine/config_schema.json | 283 +++- modules/tengine/tengine.go | 73 +- modules/tengine/tengine_test.go | 33 +- modules/traefik/config_schema.json | 283 +++- modules/traefik/traefik.go | 33 +- modules/traefik/traefik_test.go | 18 +- modules/unbound/config_schema.json | 89 +- modules/unbound/init.go | 6 +- modules/unbound/unbound.go | 99 +- modules/unbound/unbound_test.go | 36 +- modules/upsd/client.go | 6 +- modules/upsd/config_schema.json | 57 +- modules/upsd/upsd.go | 32 +- modules/upsd/upsd_test.go | 16 +- modules/vcsa/config_schema.json | 283 +++- modules/vcsa/vcsa.go | 39 +- modules/vcsa/vcsa_test.go | 28 +- modules/vernemq/config_schema.json | 283 +++- modules/vernemq/init.go | 26 + modules/vernemq/vernemq.go | 77 +- modules/vernemq/vernemq_test.go | 26 +- modules/vsphere/config_schema.json | 145 +- modules/vsphere/discover.go | 2 +- modules/vsphere/init.go | 2 +- modules/vsphere/vsphere.go | 48 +- modules/vsphere/vsphere_test.go | 33 +- modules/weblog/config_schema.json | 345 ++--- modules/weblog/weblog.go | 64 +- modules/weblog/weblog_test.go | 50 +- modules/whoisquery/config_schema.json | 57 +- modules/whoisquery/provider.go | 2 +- modules/whoisquery/whoisquery.go | 27 +- modules/whoisquery/whoisquery_test.go | 14 +- modules/windows/config_schema.json | 283 +++- modules/windows/init.go | 11 +- modules/windows/windows.go | 49 +- modules/windows/windows_test.go | 12 +- modules/wireguard/config_schema.json | 27 +- modules/wireguard/wireguard.go | 26 +- modules/wireguard/wireguard_test.go | 10 +- modules/x509check/config_schema.json | 103 +- modules/x509check/provider.go | 4 +- modules/x509check/x509check.go | 27 +- modules/x509check/x509check_test.go | 12 +- modules/zookeeper/collect.go | 4 + modules/zookeeper/config_schema.json | 71 +- modules/zookeeper/fetcher.go | 3 + modules/zookeeper/init.go | 41 + modules/zookeeper/zookeeper.go | 94 +- modules/zookeeper/zookeeper_test.go | 22 +- pkg/logs/csv.go | 10 +- pkg/logs/json.go | 2 +- pkg/logs/ltsv.go | 6 +- pkg/logs/parser.go | 10 +- pkg/logs/regexp.go | 2 +- pkg/matcher/glob.go | 3 +- pkg/tlscfg/config.go | 8 +- pkg/web/client.go | 14 +- pkg/web/client_test.go | 2 +- pkg/web/duration.go | 49 +- pkg/web/duration_test.go | 106 +- pkg/web/request.go | 16 +- pkg/web/web.go | 4 +- 399 files changed, 18533 insertions(+), 14219 deletions(-) delete mode 100644 agent/confgroup/cache.go delete mode 100644 agent/confgroup/cache_test.go create mode 100644 agent/confgroup/config.go delete mode 100644 agent/discovery/dyncfg/config.go delete mode 100644 agent/discovery/dyncfg/dyncfg.go delete mode 100644 agent/discovery/dyncfg/dyncfg_test.go delete mode 100644 agent/discovery/dyncfg/ext.go delete mode 100644 agent/discovery/sd/pipeline/qq.yaml create mode 100644 agent/jobmgr/dyncfg.go delete mode 100644 agent/jobmgr/run.go create mode 100644 agent/jobmgr/sim_test.go delete mode 100644 config/go.d/energid.conf create mode 100644 config/go.d/sd/hostsocket.yaml delete mode 100644 config/go.d/solr.conf delete mode 100644 config/go.d/springboot2.conf create mode 100644 modules/activemq/collect.go create mode 100644 modules/activemq/init.go create mode 100644 modules/bind/collect.go create mode 100644 modules/bind/init.go create mode 100644 modules/cockroachdb/init.go create mode 100644 modules/coredns/init.go create mode 100644 modules/docker_engine/init.go create mode 100644 modules/dockerhub/init.go delete mode 120000 modules/energid/README.md delete mode 100644 modules/energid/charts.go delete mode 100644 modules/energid/collect.go delete mode 100644 modules/energid/config_schema.json delete mode 100644 modules/energid/energid.go delete mode 100644 modules/energid/energid_test.go delete mode 100644 modules/energid/init.go delete mode 100644 modules/energid/integrations/energi_core_wallet.md delete mode 100644 modules/energid/jsonrpc.go delete mode 100644 modules/energid/metadata.yaml delete mode 100644 modules/energid/metrics.go delete mode 100644 modules/energid/testdata/v2.4.1/getblockchaininfo.json delete mode 100644 modules/energid/testdata/v2.4.1/getmemoryinfo.json delete mode 100644 modules/energid/testdata/v2.4.1/getmempoolinfo.json delete mode 100644 modules/energid/testdata/v2.4.1/getnetworkinfo.json delete mode 100644 modules/energid/testdata/v2.4.1/gettxoutsetinfo.json create mode 100644 modules/fluentd/collect.go create mode 100644 modules/fluentd/init.go create mode 100644 modules/freeradius/init.go create mode 100644 modules/geth/init.go create mode 100644 modules/hdfs/init.go create mode 100644 modules/hdfs/raw_data.go create mode 100644 modules/k8s_kubelet/init.go create mode 100644 modules/k8s_kubeproxy/init.go create mode 100644 modules/lighttpd/init.go create mode 100644 modules/openvpn/init.go create mode 100644 modules/phpdaemon/init.go create mode 100644 modules/pulsar/cache.go create mode 100644 modules/pulsar/init.go delete mode 120000 modules/solr/README.md delete mode 100644 modules/solr/charts.go delete mode 100644 modules/solr/config_schema.json delete mode 100644 modules/solr/integrations/solr.md delete mode 100644 modules/solr/metadata.yaml delete mode 100644 modules/solr/parser.go delete mode 100644 modules/solr/solr.go delete mode 100644 modules/solr/solr_test.go delete mode 100644 modules/solr/testdata/core-metrics-v6.txt delete mode 100644 modules/solr/testdata/core-metrics-v7.txt delete mode 120000 modules/springboot2/README.md delete mode 100644 modules/springboot2/charts.go delete mode 100644 modules/springboot2/config_schema.json delete mode 100644 modules/springboot2/integrations/java_spring-boot_2_applications.md delete mode 100644 modules/springboot2/metadata.yaml delete mode 100644 modules/springboot2/springboot2.go delete mode 100644 modules/springboot2/springboot2_test.go delete mode 100644 modules/springboot2/tests/testdata.txt delete mode 100644 modules/springboot2/tests/testdata2.txt create mode 100644 modules/vernemq/init.go create mode 100644 modules/zookeeper/init.go diff --git a/README.md b/README.md index 7f34241b1..93ca759e8 100644 --- a/README.md +++ b/README.md @@ -68,7 +68,6 @@ see the appropriate collector readme. | [docker_engine](https://github.com/netdata/go.d.plugin/tree/master/modules/docker_engine) | Docker Engine | | [dockerhub](https://github.com/netdata/go.d.plugin/tree/master/modules/dockerhub) | Docker Hub | | [elasticsearch](https://github.com/netdata/go.d.plugin/tree/master/modules/elasticsearch) | Elasticsearch/OpenSearch | -| [energid](https://github.com/netdata/go.d.plugin/tree/master/modules/energid) | Energi Core | | [envoy](https://github.com/netdata/go.d.plugin/tree/master/modules/envoy) | Envoy | | [example](https://github.com/netdata/go.d.plugin/tree/master/modules/example) | - | | [filecheck](https://github.com/netdata/go.d.plugin/tree/master/modules/filecheck) | Files and Directories | @@ -110,9 +109,7 @@ see the appropriate collector readme. | [redis](https://github.com/netdata/go.d.plugin/tree/master/modules/redis) | Redis | | [scaleio](https://github.com/netdata/go.d.plugin/tree/master/modules/scaleio) | Dell EMC ScaleIO | | [SNMP](https://github.com/netdata/go.d.plugin/blob/master/modules/snmp) | SNMP | -| [solr](https://github.com/netdata/go.d.plugin/tree/master/modules/solr) | Solr | | [squidlog](https://github.com/netdata/go.d.plugin/tree/master/modules/squidlog) | Squid | -| [springboot2](https://github.com/netdata/go.d.plugin/tree/master/modules/springboot2) | Spring Boot2 | | [supervisord](https://github.com/netdata/go.d.plugin/tree/master/modules/supervisord) | Supervisor | | [systemdunits](https://github.com/netdata/go.d.plugin/tree/master/modules/systemdunits) | Systemd unit state | | [tengine](https://github.com/netdata/go.d.plugin/tree/master/modules/tengine) | Tengine | diff --git a/agent/agent.go b/agent/agent.go index 9d6a85f91..43b4d8879 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -96,11 +96,9 @@ func serve(a *Agent) { var wg sync.WaitGroup var exit bool - var reload bool for { ctx, cancel := context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, "reload", reload) wg.Add(1) go func() { defer wg.Done(); a.run(ctx) }() @@ -136,7 +134,6 @@ func serve(a *Agent) { os.Exit(0) } - reload = true time.Sleep(time.Second) } } @@ -169,7 +166,7 @@ func (a *Agent) run(ctx context.Context) { discCfg := a.buildDiscoveryConf(enabledModules) - discoveryManager, err := discovery.NewManager(discCfg) + discMgr, err := discovery.NewManager(discCfg) if err != nil { a.Error(err) if isTerminal { @@ -178,46 +175,32 @@ func (a *Agent) run(ctx context.Context) { return } - functionsManager := functions.NewManager() - - jobsManager := jobmgr.NewManager() - jobsManager.PluginName = a.Name - jobsManager.Out = a.Out - jobsManager.Modules = enabledModules - - // TODO: API will be changed in https://github.com/netdata/netdata/pull/16702 - //if logger.Level.Enabled(slog.LevelDebug) { - // dyncfgDiscovery, _ := dyncfg.NewDiscovery(dyncfg.Config{ - // Plugin: a.Name, - // API: netdataapi.New(a.Out), - // Modules: enabledModules, - // ModuleConfigDefaults: discCfg.Registry, - // Functions: functionsManager, - // }) - // - // discoveryManager.Add(dyncfgDiscovery) - // - // jobsManager.Dyncfg = dyncfgDiscovery - //} + fnMgr := functions.NewManager() + + jobMgr := jobmgr.New() + jobMgr.PluginName = a.Name + jobMgr.Out = a.Out + jobMgr.Modules = enabledModules + jobMgr.FnReg = fnMgr if reg := a.setupVnodeRegistry(); reg == nil || reg.Len() == 0 { vnodes.Disabled = true } else { - jobsManager.Vnodes = reg + jobMgr.Vnodes = reg } if a.LockDir != "" { - jobsManager.FileLock = filelock.New(a.LockDir) + jobMgr.FileLock = filelock.New(a.LockDir) } - var statusSaveManager *filestatus.Manager + var fsMgr *filestatus.Manager if !isTerminal && a.StateFile != "" { - statusSaveManager = filestatus.NewManager(a.StateFile) - jobsManager.StatusSaver = statusSaveManager + fsMgr = filestatus.NewManager(a.StateFile) + jobMgr.FileStatus = fsMgr if store, err := filestatus.LoadStore(a.StateFile); err != nil { a.Warningf("couldn't load state file: %v", err) } else { - jobsManager.StatusStore = store + jobMgr.FileStatusStore = store } } @@ -225,17 +208,17 @@ func (a *Agent) run(ctx context.Context) { var wg sync.WaitGroup wg.Add(1) - go func() { defer wg.Done(); functionsManager.Run(ctx) }() + go func() { defer wg.Done(); fnMgr.Run(ctx) }() wg.Add(1) - go func() { defer wg.Done(); jobsManager.Run(ctx, in) }() + go func() { defer wg.Done(); jobMgr.Run(ctx, in) }() wg.Add(1) - go func() { defer wg.Done(); discoveryManager.Run(ctx, in) }() + go func() { defer wg.Done(); discMgr.Run(ctx, in) }() - if statusSaveManager != nil { + if fsMgr != nil { wg.Add(1) - go func() { defer wg.Done(); statusSaveManager.Run(ctx) }() + go func() { defer wg.Done(); fsMgr.Run(ctx) }() } wg.Wait() diff --git a/agent/agent_test.go b/agent/agent_test.go index 2a15a6b73..2abbdb31a 100644 --- a/agent/agent_test.go +++ b/agent/agent_test.go @@ -74,17 +74,17 @@ func prepareRegistry(mux *sync.Mutex, stats map[string]int, names ...string) mod func prepareMockModule(name string, mux *sync.Mutex, stats map[string]int) module.Module { return &module.MockModule{ - InitFunc: func() bool { + InitFunc: func() error { mux.Lock() defer mux.Unlock() stats[name+"_init"]++ - return true + return nil }, - CheckFunc: func() bool { + CheckFunc: func() error { mux.Lock() defer mux.Unlock() stats[name+"_check"]++ - return true + return nil }, ChartsFunc: func() *module.Charts { mux.Lock() diff --git a/agent/confgroup/cache.go b/agent/confgroup/cache.go deleted file mode 100644 index 40c8071d5..000000000 --- a/agent/confgroup/cache.go +++ /dev/null @@ -1,93 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package confgroup - -func NewCache() *Cache { - return &Cache{ - hashes: make(map[uint64]uint), - sources: make(map[string]map[uint64]Config), - } -} - -type Cache struct { - hashes map[uint64]uint // map[cfgHash]cfgCount - sources map[string]map[uint64]Config // map[cfgSource]map[cfgHash]cfg -} - -func (c *Cache) Add(group *Group) (added, removed []Config) { - if group == nil { - return nil, nil - } - - if len(group.Configs) == 0 { - return c.addEmpty(group) - } - - return c.addNotEmpty(group) -} - -func (c *Cache) addEmpty(group *Group) (added, removed []Config) { - set, ok := c.sources[group.Source] - if !ok { - return nil, nil - } - - for hash, cfg := range set { - c.hashes[hash]-- - if c.hashes[hash] == 0 { - removed = append(removed, cfg) - } - delete(set, hash) - } - - delete(c.sources, group.Source) - - return nil, removed -} - -func (c *Cache) addNotEmpty(group *Group) (added, removed []Config) { - set, ok := c.sources[group.Source] - if !ok { - set = make(map[uint64]Config) - c.sources[group.Source] = set - } - - seen := make(map[uint64]struct{}) - - for _, cfg := range group.Configs { - hash := cfg.Hash() - seen[hash] = struct{}{} - - if _, ok := set[hash]; ok { - continue - } - - set[hash] = cfg - if c.hashes[hash] == 0 { - added = append(added, cfg) - } - c.hashes[hash]++ - } - - if !ok { - return added, nil - } - - for hash, cfg := range set { - if _, ok := seen[hash]; ok { - continue - } - - delete(set, hash) - c.hashes[hash]-- - if c.hashes[hash] == 0 { - removed = append(removed, cfg) - } - } - - if ok && len(set) == 0 { - delete(c.sources, group.Source) - } - - return added, removed -} diff --git a/agent/confgroup/cache_test.go b/agent/confgroup/cache_test.go deleted file mode 100644 index a2bbd4919..000000000 --- a/agent/confgroup/cache_test.go +++ /dev/null @@ -1,134 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package confgroup - -import ( - "sort" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestConfigCache_Add(t *testing.T) { - tests := map[string]struct { - prepareGroups []Group - groups []Group - expectedAdd []Config - expectedRemove []Config - }{ - "new group, new configs": { - groups: []Group{ - prepareGroup("source", prepareCfg("name", "module")), - }, - expectedAdd: []Config{ - prepareCfg("name", "module"), - }, - }, - "several equal updates for the same group": { - groups: []Group{ - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - prepareGroup("source", prepareCfg("name", "module")), - }, - expectedAdd: []Config{ - prepareCfg("name", "module"), - }, - }, - "empty group update for cached group": { - prepareGroups: []Group{ - prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - groups: []Group{ - prepareGroup("source"), - }, - expectedRemove: []Config{ - prepareCfg("name1", "module"), - prepareCfg("name2", "module"), - }, - }, - "changed group update for cached group": { - prepareGroups: []Group{ - prepareGroup("source", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - groups: []Group{ - prepareGroup("source", prepareCfg("name2", "module")), - }, - expectedRemove: []Config{ - prepareCfg("name1", "module"), - }, - }, - "empty group update for uncached group": { - groups: []Group{ - prepareGroup("source"), - prepareGroup("source"), - }, - }, - "several updates with different source but same context": { - groups: []Group{ - prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - expectedAdd: []Config{ - prepareCfg("name1", "module"), - prepareCfg("name2", "module"), - }, - }, - "have equal configs from 2 sources, get empty group for the 1st source": { - prepareGroups: []Group{ - prepareGroup("source1", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - prepareGroup("source2", prepareCfg("name1", "module"), prepareCfg("name2", "module")), - }, - groups: []Group{ - prepareGroup("source2"), - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - cache := NewCache() - - for _, group := range test.prepareGroups { - cache.Add(&group) - } - - var added, removed []Config - for _, group := range test.groups { - a, r := cache.Add(&group) - added = append(added, a...) - removed = append(removed, r...) - } - - sortConfigs(added) - sortConfigs(removed) - sortConfigs(test.expectedAdd) - sortConfigs(test.expectedRemove) - - assert.Equalf(t, test.expectedAdd, added, "added configs") - assert.Equalf(t, test.expectedRemove, removed, "removed configs") - }) - } -} - -func prepareGroup(source string, cfgs ...Config) Group { - return Group{ - Configs: cfgs, - Source: source, - } -} - -func prepareCfg(name, module string) Config { - return Config{ - "name": name, - "module": module, - } -} - -func sortConfigs(cfgs []Config) { - if len(cfgs) == 0 { - return - } - sort.Slice(cfgs, func(i, j int) bool { return cfgs[i].FullName() < cfgs[j].FullName() }) -} diff --git a/agent/confgroup/config.go b/agent/confgroup/config.go new file mode 100644 index 000000000..92d52b9d4 --- /dev/null +++ b/agent/confgroup/config.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package confgroup + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/netdata/go.d.plugin/agent/hostinfo" + "github.com/netdata/go.d.plugin/agent/module" + + "github.com/ilyam8/hashstructure" + "gopkg.in/yaml.v2" +) + +const ( + keyName = "name" + keyModule = "module" + keyUpdateEvery = "update_every" + keyDetectRetry = "autodetection_retry" + keyPriority = "priority" + keyLabels = "labels" + keyVnode = "vnode" + + ikeySource = "__source__" + ikeySourceType = "__source_type__" + ikeyProvider = "__provider__" +) + +type Config map[string]any + +func (c Config) HashIncludeMap(_ string, k, _ any) (bool, error) { + s := k.(string) + return !(strings.HasPrefix(s, "__") || strings.HasSuffix(s, "__")), nil +} + +func (c Config) Set(key string, value any) Config { c[key] = value; return c } +func (c Config) Get(key string) any { return c[key] } + +func (c Config) Name() string { v, _ := c.Get(keyName).(string); return v } +func (c Config) Module() string { v, _ := c.Get(keyModule).(string); return v } +func (c Config) FullName() string { return fullName(c.Name(), c.Module()) } +func (c Config) UpdateEvery() int { v, _ := c.Get(keyUpdateEvery).(int); return v } +func (c Config) AutoDetectionRetry() int { v, _ := c.Get(keyDetectRetry).(int); return v } +func (c Config) Priority() int { v, _ := c.Get(keyPriority).(int); return v } +func (c Config) Labels() map[any]any { v, _ := c.Get(keyLabels).(map[any]any); return v } +func (c Config) Hash() uint64 { return calcHash(c) } +func (c Config) Vnode() string { v, _ := c.Get(keyVnode).(string); return v } + +func (c Config) SetName(v string) Config { return c.Set(keyName, v) } +func (c Config) SetModule(v string) Config { return c.Set(keyModule, v) } + +func (c Config) UID() string { + return fmt.Sprintf("%s_%s_%s_%s_%d", c.SourceType(), c.Provider(), c.Source(), c.FullName(), c.Hash()) +} + +func (c Config) Source() string { v, _ := c.Get(ikeySource).(string); return v } +func (c Config) SourceType() string { v, _ := c.Get(ikeySourceType).(string); return v } +func (c Config) Provider() string { v, _ := c.Get(ikeyProvider).(string); return v } +func (c Config) SetSource(v string) Config { return c.Set(ikeySource, v) } +func (c Config) SetSourceType(v string) Config { return c.Set(ikeySourceType, v) } +func (c Config) SetProvider(v string) Config { return c.Set(ikeyProvider, v) } + +func (c Config) SourceTypePriority() int { + switch c.SourceType() { + default: + return 0 + case "stock": + return 2 + case "discovered": + return 4 + case "user": + return 8 + case "dyncfg": + return 16 + } +} + +func (c Config) Clone() (Config, error) { + type plain Config + bytes, err := yaml.Marshal((plain)(c)) + if err != nil { + return nil, err + } + var newConfig Config + if err := yaml.Unmarshal(bytes, &newConfig); err != nil { + return nil, err + } + return newConfig, nil +} + +func (c Config) ApplyDefaults(def Default) { + if c.UpdateEvery() <= 0 { + v := firstPositive(def.UpdateEvery, module.UpdateEvery) + c.Set("update_every", v) + } + if c.AutoDetectionRetry() <= 0 { + v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry) + c.Set("autodetection_retry", v) + } + if c.Priority() <= 0 { + v := firstPositive(def.Priority, module.Priority) + c.Set("priority", v) + } + if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 { + c.Set("update_every", def.MinUpdateEvery) + } + if c.Name() == "" { + c.Set("name", c.Module()) + } else { + c.Set("name", cleanName(jobNameResolveHostname(c.Name()))) + } + + if v, ok := c.Get("url").(string); ok { + c.Set("url", urlResolveHostname(v)) + } +} + +var reInvalidCharacters = regexp.MustCompile(`\s+|\.+`) + +func cleanName(name string) string { + return reInvalidCharacters.ReplaceAllString(name, "_") +} + +func fullName(name, module string) string { + if name == module { + return name + } + return module + "_" + name +} + +func calcHash(obj any) uint64 { + hash, _ := hashstructure.Hash(obj, nil) + return hash +} + +func firstPositive(value int, others ...int) int { + if value > 0 || len(others) == 0 { + return value + } + return firstPositive(others[0], others[1:]...) +} + +func urlResolveHostname(rawURL string) string { + if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") { + return rawURL + } + + u, err := url.Parse(rawURL) + if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) { + return rawURL + } + + u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1) + + return u.String() +} + +func jobNameResolveHostname(name string) string { + if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") { + return name + } + + if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") { + return name + } + + return strings.Replace(name, "hostname", hostinfo.Hostname, 1) +} diff --git a/agent/confgroup/group.go b/agent/confgroup/group.go index 649a145d7..286a0f922 100644 --- a/agent/confgroup/group.go +++ b/agent/confgroup/group.go @@ -2,126 +2,8 @@ package confgroup -import ( - "fmt" - "net/url" - "regexp" - "strings" - - "github.com/netdata/go.d.plugin/agent/hostinfo" - "github.com/netdata/go.d.plugin/agent/module" - - "github.com/ilyam8/hashstructure" -) - type Group struct { - Configs []Config - Source string -} - -type Config map[string]interface{} - -func (c Config) HashIncludeMap(_ string, k, _ interface{}) (bool, error) { - s := k.(string) - return !(strings.HasPrefix(s, "__") && strings.HasSuffix(s, "__")), nil -} - -func (c Config) NameWithHash() string { return fmt.Sprintf("%s_%d", c.Name(), c.Hash()) } -func (c Config) Name() string { v, _ := c.get("name").(string); return v } -func (c Config) Module() string { v, _ := c.get("module").(string); return v } -func (c Config) FullName() string { return fullName(c.Name(), c.Module()) } -func (c Config) UpdateEvery() int { v, _ := c.get("update_every").(int); return v } -func (c Config) AutoDetectionRetry() int { v, _ := c.get("autodetection_retry").(int); return v } -func (c Config) Priority() int { v, _ := c.get("priority").(int); return v } -func (c Config) Labels() map[any]any { v, _ := c.get("labels").(map[any]any); return v } -func (c Config) Hash() uint64 { return calcHash(c) } -func (c Config) Source() string { v, _ := c.get("__source__").(string); return v } -func (c Config) Provider() string { v, _ := c.get("__provider__").(string); return v } -func (c Config) Vnode() string { v, _ := c.get("vnode").(string); return v } - -func (c Config) SetName(v string) { c.set("name", v) } -func (c Config) SetModule(v string) { c.set("module", v) } -func (c Config) SetSource(v string) { c.set("__source__", v) } -func (c Config) SetProvider(v string) { c.set("__provider__", v) } - -func (c Config) set(key string, value interface{}) { c[key] = value } -func (c Config) get(key string) interface{} { return c[key] } - -func (c Config) Apply(def Default) { - if c.UpdateEvery() <= 0 { - v := firstPositive(def.UpdateEvery, module.UpdateEvery) - c.set("update_every", v) - } - if c.AutoDetectionRetry() <= 0 { - v := firstPositive(def.AutoDetectionRetry, module.AutoDetectionRetry) - c.set("autodetection_retry", v) - } - if c.Priority() <= 0 { - v := firstPositive(def.Priority, module.Priority) - c.set("priority", v) - } - if c.UpdateEvery() < def.MinUpdateEvery && def.MinUpdateEvery > 0 { - c.set("update_every", def.MinUpdateEvery) - } - if c.Name() == "" { - c.set("name", c.Module()) - } else { - c.set("name", cleanName(jobNameResolveHostname(c.Name()))) - } - - if v, ok := c.get("url").(string); ok { - c.set("url", urlResolveHostname(v)) - } -} - -func cleanName(name string) string { - return reInvalidCharacters.ReplaceAllString(name, "_") -} - -var reInvalidCharacters = regexp.MustCompile(`\s+|\.+`) - -func fullName(name, module string) string { - if name == module { - return name - } - return module + "_" + name -} - -func calcHash(obj interface{}) uint64 { - hash, _ := hashstructure.Hash(obj, nil) - return hash -} - -func firstPositive(value int, others ...int) int { - if value > 0 || len(others) == 0 { - return value - } - return firstPositive(others[0], others[1:]...) -} - -func urlResolveHostname(rawURL string) string { - if hostinfo.Hostname == "" || !strings.Contains(rawURL, "hostname") { - return rawURL - } - - u, err := url.Parse(rawURL) - if err != nil || (u.Hostname() != "hostname" && !strings.Contains(u.Hostname(), "hostname.")) { - return rawURL - } - - u.Host = strings.Replace(u.Host, "hostname", hostinfo.Hostname, 1) - - return u.String() -} - -func jobNameResolveHostname(name string) string { - if hostinfo.Hostname == "" || !strings.Contains(name, "hostname") { - return name - } - - if name != "hostname" && !strings.HasPrefix(name, "hostname.") && !strings.HasPrefix(name, "hostname_") { - return name - } - - return strings.Replace(name, "hostname", hostinfo.Hostname, 1) + Configs []Config + Source string + SourceType string } diff --git a/agent/confgroup/group_test.go b/agent/confgroup/group_test.go index af9a804e8..beac8e61b 100644 --- a/agent/confgroup/group_test.go +++ b/agent/confgroup/group_test.go @@ -316,7 +316,7 @@ func TestConfig_Apply(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { - test.origCfg.Apply(test.def) + test.origCfg.ApplyDefaults(test.def) assert.Equal(t, test.expectedCfg, test.origCfg) }) diff --git a/agent/discovery/dummy/discovery.go b/agent/discovery/dummy/discovery.go index acd0b8f1c..36fb76710 100644 --- a/agent/discovery/dummy/discovery.go +++ b/agent/discovery/dummy/discovery.go @@ -65,15 +65,17 @@ func (d *Discovery) newCfgGroup(name string) *confgroup.Group { return nil } + src := "internal" cfg := confgroup.Config{} cfg.SetModule(name) - cfg.SetSource(name) + cfg.SetSource(src) + cfg.SetSourceType("stock") cfg.SetProvider("dummy") - cfg.Apply(def) + cfg.ApplyDefaults(def) group := &confgroup.Group{ Configs: []confgroup.Config{cfg}, - Source: name, + Source: src, } return group } diff --git a/agent/discovery/dyncfg/config.go b/agent/discovery/dyncfg/config.go deleted file mode 100644 index ebda00f50..000000000 --- a/agent/discovery/dyncfg/config.go +++ /dev/null @@ -1,35 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package dyncfg - -import ( - "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/functions" - "github.com/netdata/go.d.plugin/agent/module" -) - -type Config struct { - Plugin string - API NetdataDyncfgAPI - Functions FunctionRegistry - Modules module.Registry - ModuleConfigDefaults confgroup.Registry -} - -type NetdataDyncfgAPI interface { - DynCfgEnable(string) error - DynCfgReset() error - DyncCfgRegisterModule(string) error - DynCfgRegisterJob(_, _, _ string) error - DynCfgReportJobStatus(_, _, _, _ string) error - FunctionResultSuccess(_, _, _ string) error - FunctionResultReject(_, _, _ string) error -} - -type FunctionRegistry interface { - Register(name string, reg func(functions.Function)) -} - -func validateConfig(cfg Config) error { - return nil -} diff --git a/agent/discovery/dyncfg/dyncfg.go b/agent/discovery/dyncfg/dyncfg.go deleted file mode 100644 index 2f3c34234..000000000 --- a/agent/discovery/dyncfg/dyncfg.go +++ /dev/null @@ -1,256 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package dyncfg - -import ( - "bytes" - "context" - "fmt" - "log/slog" - "strings" - "sync" - - "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/functions" - "github.com/netdata/go.d.plugin/agent/module" - "github.com/netdata/go.d.plugin/logger" - - "gopkg.in/yaml.v2" -) - -const dynCfg = "dyncfg" - -func NewDiscovery(cfg Config) (*Discovery, error) { - if err := validateConfig(cfg); err != nil { - return nil, err - } - - mgr := &Discovery{ - Logger: logger.New().With( - slog.String("component", "discovery dyncfg"), - ), - Plugin: cfg.Plugin, - API: cfg.API, - Modules: cfg.Modules, - ModuleConfigDefaults: nil, - mux: &sync.Mutex{}, - configs: make(map[string]confgroup.Config), - } - - mgr.registerFunctions(cfg.Functions) - - return mgr, nil -} - -type Discovery struct { - *logger.Logger - - Plugin string - API NetdataDyncfgAPI - Modules module.Registry - ModuleConfigDefaults confgroup.Registry - - in chan<- []*confgroup.Group - - mux *sync.Mutex - configs map[string]confgroup.Config -} - -func (d *Discovery) String() string { - return d.Name() -} - -func (d *Discovery) Name() string { - return "dyncfg discovery" -} - -func (d *Discovery) Run(ctx context.Context, in chan<- []*confgroup.Group) { - d.Info("instance is started") - defer func() { d.Info("instance is stopped") }() - - d.in = in - - if reload, ok := ctx.Value("reload").(bool); ok && reload { - _ = d.API.DynCfgReset() - } - - _ = d.API.DynCfgEnable(d.Plugin) - - for k := range d.Modules { - _ = d.API.DyncCfgRegisterModule(k) - } - - <-ctx.Done() -} - -func (d *Discovery) registerFunctions(r FunctionRegistry) { - r.Register("get_plugin_config", d.getPluginConfig) - r.Register("get_plugin_config_schema", d.getModuleConfigSchema) - r.Register("set_plugin_config", d.setPluginConfig) - - r.Register("get_module_config", d.getModuleConfig) - r.Register("get_module_config_schema", d.getModuleConfigSchema) - r.Register("set_module_config", d.setModuleConfig) - - r.Register("get_job_config", d.getJobConfig) - r.Register("get_job_config_schema", d.getJobConfigSchema) - r.Register("set_job_config", d.setJobConfig) - r.Register("delete_job", d.deleteJobName) -} - -func (d *Discovery) getPluginConfig(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) getPluginConfigSchema(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) setPluginConfig(fn functions.Function) { d.notImplemented(fn) } - -func (d *Discovery) getModuleConfig(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) getModuleConfigSchema(fn functions.Function) { d.notImplemented(fn) } -func (d *Discovery) setModuleConfig(fn functions.Function) { d.notImplemented(fn) } - -func (d *Discovery) getJobConfig(fn functions.Function) { - if err := d.verifyFn(fn, 2); err != nil { - d.apiReject(fn, err.Error()) - return - } - - moduleName, jobName := fn.Args[0], fn.Args[1] - - bs, err := d.getConfigBytes(moduleName + "_" + jobName) - if err != nil { - d.apiReject(fn, err.Error()) - return - } - - d.apiSuccessYAML(fn, string(bs)) -} - -func (d *Discovery) getJobConfigSchema(fn functions.Function) { - if err := d.verifyFn(fn, 1); err != nil { - d.apiReject(fn, err.Error()) - return - } - - name := fn.Args[0] - - v, ok := d.Modules[name] - if !ok { - msg := jsonErrorf("module %s is not registered", name) - d.apiReject(fn, msg) - return - } - - d.apiSuccessJSON(fn, v.JobConfigSchema) -} - -func (d *Discovery) setJobConfig(fn functions.Function) { - if err := d.verifyFn(fn, 2); err != nil { - d.apiReject(fn, err.Error()) - return - } - - var cfg confgroup.Config - if err := yaml.NewDecoder(bytes.NewBuffer(fn.Payload)).Decode(&cfg); err != nil { - d.apiReject(fn, err.Error()) - return - } - - modName, jobName := fn.Args[0], fn.Args[1] - def, _ := d.ModuleConfigDefaults.Lookup(modName) - src := source(modName, jobName) - - cfg.SetProvider(dynCfg) - cfg.SetSource(src) - cfg.SetModule(modName) - cfg.SetName(jobName) - cfg.Apply(def) - - d.in <- []*confgroup.Group{ - { - Configs: []confgroup.Config{cfg}, - Source: src, - }, - } - - d.apiSuccessJSON(fn, "") -} - -func (d *Discovery) deleteJobName(fn functions.Function) { - if err := d.verifyFn(fn, 2); err != nil { - d.apiReject(fn, err.Error()) - return - } - - modName, jobName := fn.Args[0], fn.Args[1] - - cfg, ok := d.getConfig(modName + "_" + jobName) - if !ok { - d.apiReject(fn, jsonErrorf("module '%s' job '%s': not registered", modName, jobName)) - return - } - if cfg.Provider() != dynCfg { - d.apiReject(fn, jsonErrorf("module '%s' job '%s': can't remove non Dyncfg job", modName, jobName)) - return - } - - d.in <- []*confgroup.Group{ - { - Configs: []confgroup.Config{}, - Source: source(modName, jobName), - }, - } - - d.apiSuccessJSON(fn, "") -} - -func (d *Discovery) apiSuccessJSON(fn functions.Function, payload string) { - _ = d.API.FunctionResultSuccess(fn.UID, "application/json", payload) -} - -func (d *Discovery) apiSuccessYAML(fn functions.Function, payload string) { - _ = d.API.FunctionResultSuccess(fn.UID, "application/x-yaml", payload) -} - -func (d *Discovery) apiReject(fn functions.Function, msg string) { - _ = d.API.FunctionResultReject(fn.UID, "application/json", msg) -} - -func (d *Discovery) notImplemented(fn functions.Function) { - d.Infof("not implemented: '%s'", fn.String()) - msg := jsonErrorf("function '%s' is not implemented", fn.Name) - d.apiReject(fn, msg) -} - -func (d *Discovery) verifyFn(fn functions.Function, wantArgs int) error { - if got := len(fn.Args); got != wantArgs { - msg := jsonErrorf("wrong number of arguments: want %d, got %d (args: '%v')", wantArgs, got, fn.Args) - return fmt.Errorf(msg) - } - - if isSetFunction(fn) && len(fn.Payload) == 0 { - msg := jsonErrorf("no payload") - return fmt.Errorf(msg) - } - - return nil -} - -func jsonErrorf(format string, a ...any) string { - msg := fmt.Sprintf(format, a...) - msg = strings.ReplaceAll(msg, "\n", " ") - - return fmt.Sprintf(`{ "error": "%s" }`+"\n", msg) -} - -func source(modName, jobName string) string { - return fmt.Sprintf("%s/%s/%s", dynCfg, modName, jobName) -} - -func cfgJobName(cfg confgroup.Config) string { - if strings.HasPrefix(cfg.Source(), "dyncfg") { - return cfg.Name() - } - return cfg.NameWithHash() -} - -func isSetFunction(fn functions.Function) bool { - return strings.HasPrefix(fn.Name, "set_") -} diff --git a/agent/discovery/dyncfg/dyncfg_test.go b/agent/discovery/dyncfg/dyncfg_test.go deleted file mode 100644 index 3eee1cef3..000000000 --- a/agent/discovery/dyncfg/dyncfg_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package dyncfg - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/functions" - "github.com/netdata/go.d.plugin/agent/module" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewDiscovery(t *testing.T) { - -} - -func TestDiscovery_Register(t *testing.T) { - tests := map[string]struct { - regConfigs []confgroup.Config - wantApiStats *mockApi - wantConfigs int - }{ - "register jobs created by Dyncfg and other providers": { - regConfigs: []confgroup.Config{ - prepareConfig( - "__provider__", dynCfg, - "module", "test", - "name", "first", - ), - prepareConfig( - "__provider__", "test", - "module", "test", - "name", "second", - ), - }, - wantConfigs: 2, - wantApiStats: &mockApi{ - callsDynCfgRegisterJob: 1, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var mock mockApi - d := &Discovery{ - API: &mock, - mux: &sync.Mutex{}, - configs: make(map[string]confgroup.Config), - } - - for _, v := range test.regConfigs { - d.Register(v) - } - - assert.Equal(t, test.wantApiStats, &mock) - assert.Equal(t, test.wantConfigs, len(d.configs)) - }) - } -} - -func TestDiscovery_Unregister(t *testing.T) { - tests := map[string]struct { - regConfigs []confgroup.Config - unregConfigs []confgroup.Config - wantApiStats *mockApi - wantConfigs int - }{ - "register/unregister jobs created by Dyncfg and other providers": { - wantConfigs: 0, - wantApiStats: &mockApi{ - callsDynCfgRegisterJob: 1, - }, - regConfigs: []confgroup.Config{ - prepareConfig( - "__provider__", dynCfg, - "module", "test", - "name", "first", - ), - prepareConfig( - "__provider__", "test", - "module", "test", - "name", "second", - ), - }, - unregConfigs: []confgroup.Config{ - prepareConfig( - "__provider__", dynCfg, - "module", "test", - "name", "first", - ), - prepareConfig( - "__provider__", "test", - "module", "test", - "name", "second", - ), - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var mock mockApi - d := &Discovery{ - API: &mock, - mux: &sync.Mutex{}, - configs: make(map[string]confgroup.Config), - } - - for _, v := range test.regConfigs { - d.Register(v) - } - for _, v := range test.unregConfigs { - d.Unregister(v) - } - - assert.Equal(t, test.wantApiStats, &mock) - assert.Equal(t, test.wantConfigs, len(d.configs)) - }) - } -} - -func TestDiscovery_UpdateStatus(t *testing.T) { - -} - -func TestDiscovery_Run(t *testing.T) { - tests := map[string]struct { - wantApiStats *mockApi - }{ - "default run": { - wantApiStats: &mockApi{ - callsDynCfgEnable: 1, - callsDyncCfgRegisterModule: 2, - callsRegister: 10, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - var mock mockApi - d, err := NewDiscovery(Config{ - Plugin: "test", - API: &mock, - Functions: &mock, - Modules: module.Registry{ - "module1": module.Creator{}, - "module2": module.Creator{}, - }, - ModuleConfigDefaults: nil, - }) - require.Nil(t, err) - - testTime := time.Second * 3 - ctx, cancel := context.WithTimeout(context.Background(), testTime) - defer cancel() - - in := make(chan<- []*confgroup.Group) - done := make(chan struct{}) - - go func() { defer close(done); d.Run(ctx, in) }() - - timeout := testTime + time.Second*2 - tk := time.NewTimer(timeout) - defer tk.Stop() - - select { - case <-done: - assert.Equal(t, test.wantApiStats, &mock) - case <-tk.C: - t.Errorf("timed out after %s", timeout) - } - }) - } -} - -type mockApi struct { - callsDynCfgEnable int - callsDyncCfgRegisterModule int - callsDynCfgRegisterJob int - callsDynCfgReportJobStatus int - callsFunctionResultSuccess int - callsFunctionResultReject int - - callsRegister int -} - -func (m *mockApi) Register(string, func(functions.Function)) { - m.callsRegister++ -} - -func (m *mockApi) DynCfgEnable(string) error { - m.callsDynCfgEnable++ - return nil -} - -func (m *mockApi) DynCfgReset() error { - return nil -} - -func (m *mockApi) DyncCfgRegisterModule(string) error { - m.callsDyncCfgRegisterModule++ - return nil -} - -func (m *mockApi) DynCfgRegisterJob(_, _, _ string) error { - m.callsDynCfgRegisterJob++ - return nil -} - -func (m *mockApi) DynCfgReportJobStatus(_, _, _, _ string) error { - m.callsDynCfgReportJobStatus++ - return nil -} - -func (m *mockApi) FunctionResultSuccess(_, _, _ string) error { - m.callsFunctionResultSuccess++ - return nil -} - -func (m *mockApi) FunctionResultReject(_, _, _ string) error { - m.callsFunctionResultReject++ - return nil -} - -func prepareConfig(values ...string) confgroup.Config { - cfg := confgroup.Config{} - for i := 1; i < len(values); i += 2 { - cfg[values[i-1]] = values[i] - } - return cfg -} diff --git a/agent/discovery/dyncfg/ext.go b/agent/discovery/dyncfg/ext.go deleted file mode 100644 index 910475c3d..000000000 --- a/agent/discovery/dyncfg/ext.go +++ /dev/null @@ -1,79 +0,0 @@ -package dyncfg - -import ( - "errors" - "os" - "strings" - - "github.com/netdata/go.d.plugin/agent/confgroup" - - "gopkg.in/yaml.v2" -) - -func (d *Discovery) Register(cfg confgroup.Config) { - name := cfgJobName(cfg) - if cfg.Provider() != dynCfg { - // jobType handling in ND is not documented - _ = d.API.DynCfgRegisterJob(cfg.Module(), name, "stock") - } - - key := cfg.Module() + "_" + name - d.addConfig(key, cfg) -} - -func (d *Discovery) Unregister(cfg confgroup.Config) { - key := cfg.Module() + "_" + cfgJobName(cfg) - d.removeConfig(key) -} - -func (d *Discovery) UpdateStatus(cfg confgroup.Config, status, payload string) { - _ = d.API.DynCfgReportJobStatus(cfg.Module(), cfgJobName(cfg), status, payload) -} - -func (d *Discovery) addConfig(name string, cfg confgroup.Config) { - d.mux.Lock() - defer d.mux.Unlock() - - d.configs[name] = cfg -} - -func (d *Discovery) removeConfig(key string) { - d.mux.Lock() - defer d.mux.Unlock() - - delete(d.configs, key) -} - -func (d *Discovery) getConfig(key string) (confgroup.Config, bool) { - d.mux.Lock() - defer d.mux.Unlock() - - v, ok := d.configs[key] - return v, ok -} - -func (d *Discovery) getConfigBytes(key string) ([]byte, error) { - d.mux.Lock() - defer d.mux.Unlock() - - cfg, ok := d.configs[key] - if !ok { - return nil, errors.New("config not found") - } - - bs, err := yaml.Marshal(cfg) - if err != nil { - return nil, err - } - - return bs, nil -} - -var envNDStockConfigDir = os.Getenv("NETDATA_STOCK_CONFIG_DIR") - -func isStock(cfg confgroup.Config) bool { - if envNDStockConfigDir == "" { - return false - } - return strings.HasPrefix(cfg.Source(), envNDStockConfigDir) -} diff --git a/agent/discovery/file/parse.go b/agent/discovery/file/parse.go index b6ba52372..9fcb45faf 100644 --- a/agent/discovery/file/parse.go +++ b/agent/discovery/file/parse.go @@ -61,11 +61,12 @@ func parseStaticFormat(reg confgroup.Registry, path string, bs []byte) (*confgro for _, cfg := range modCfg.Jobs { cfg.SetModule(name) def := mergeDef(modCfg.Default, modDef) - cfg.Apply(def) + cfg.ApplyDefaults(def) } group := &confgroup.Group{ - Configs: modCfg.Jobs, - Source: path, + Configs: modCfg.Jobs, + Source: path, + SourceType: configSourceType(path), } return group, nil } @@ -79,16 +80,18 @@ func parseSDFormat(reg confgroup.Registry, path string, bs []byte) (*confgroup.G var i int for _, cfg := range cfgs { if def, ok := reg.Lookup(cfg.Module()); ok && cfg.Module() != "" { - cfg.Apply(def) + cfg.ApplyDefaults(def) cfgs[i] = cfg i++ } } group := &confgroup.Group{ - Configs: cfgs[:i], - Source: path, + Configs: cfgs[:i], + Source: path, + SourceType: configSourceType(path), } + return group, nil } diff --git a/agent/discovery/file/read.go b/agent/discovery/file/read.go index 3d27955ad..2c271995c 100644 --- a/agent/discovery/file/read.go +++ b/agent/discovery/file/read.go @@ -6,6 +6,7 @@ import ( "context" "os" "path/filepath" + "strings" "github.com/netdata/go.d.plugin/agent/confgroup" "github.com/netdata/go.d.plugin/logger" @@ -72,7 +73,7 @@ func (r *Reader) groups() (groups []*confgroup.Group) { continue } if group == nil { - group = &confgroup.Group{Source: path} + group = &confgroup.Group{Source: path, SourceType: configSourceType(path)} } groups = append(groups, group) } @@ -81,9 +82,17 @@ func (r *Reader) groups() (groups []*confgroup.Group) { for _, group := range groups { for _, cfg := range group.Configs { cfg.SetSource(group.Source) + cfg.SetSourceType(group.SourceType) cfg.SetProvider(r.Name()) } } return groups } + +func configSourceType(path string) string { + if strings.Contains(path, "/etc/netdata") { + return "user" + } + return "stock" +} diff --git a/agent/discovery/file/watch.go b/agent/discovery/file/watch.go index e33aac3ec..b73674274 100644 --- a/agent/discovery/file/watch.go +++ b/agent/discovery/file/watch.go @@ -148,7 +148,7 @@ func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) { if group, err := parse(w.reg, file); err != nil { w.Warningf("parse '%s': %v", file, err) } else if group == nil { - groups = append(groups, &confgroup.Group{Source: file}) + groups = append(groups, &confgroup.Group{Source: file, SourceType: configSourceType(file)}) } else { groups = append(groups, group) } @@ -165,11 +165,13 @@ func (w *Watcher) refresh(ctx context.Context, in chan<- []*confgroup.Group) { for _, group := range groups { for _, cfg := range group.Configs { cfg.SetSource(group.Source) + cfg.SetSourceType(group.SourceType) cfg.SetProvider("file watcher") } } send(ctx, in, groups) + w.watchDirs() } @@ -202,7 +204,6 @@ func (w *Watcher) stop() { } }() - // in fact never returns an error _ = w.watcher.Close() } diff --git a/agent/discovery/manager.go b/agent/discovery/manager.go index 3ab1ab6af..433241958 100644 --- a/agent/discovery/manager.go +++ b/agent/discovery/manager.go @@ -56,9 +56,9 @@ func (m *Manager) String() string { return fmt.Sprintf("discovery manager: %v", m.discoverers) } -func (m *Manager) Add(d discoverer) { - m.discoverers = append(m.discoverers, d) -} +//func (m *Manager) Add(d discoverer) { +// m.discoverers = append(m.discoverers, d) +//} func (m *Manager) Run(ctx context.Context, in chan<- []*confgroup.Group) { m.Info("instance is started") @@ -91,7 +91,7 @@ func (m *Manager) registerDiscoverers(cfg Config) error { if err != nil { return err } - m.Add(d) + m.discoverers = append(m.discoverers, d) } if len(cfg.Dummy.Names) > 0 { @@ -100,7 +100,7 @@ func (m *Manager) registerDiscoverers(cfg Config) error { if err != nil { return err } - m.Add(d) + m.discoverers = append(m.discoverers, d) } if len(m.discoverers) == 0 { diff --git a/agent/discovery/sd/hostsocket/net.go b/agent/discovery/sd/hostsocket/net.go index 18cc35b7d..cb8aa2f41 100644 --- a/agent/discovery/sd/hostsocket/net.go +++ b/agent/discovery/sd/hostsocket/net.go @@ -12,10 +12,12 @@ import ( "os" "os/exec" "path/filepath" + "runtime" "strings" "time" "github.com/netdata/go.d.plugin/agent/discovery/sd/model" + "github.com/netdata/go.d.plugin/agent/executable" "github.com/netdata/go.d.plugin/logger" "github.com/ilyam8/hashstructure" @@ -56,6 +58,10 @@ func NewNetSocketDiscoverer(cfg NetworkSocketConfig) (*NetDiscoverer, error) { } dir := os.Getenv("NETDATA_PLUGINS_DIR") + if dir == "" { + dir = executable.Directory + } + runtime.Version() if dir == "" { dir, _ = os.Getwd() } @@ -128,6 +134,7 @@ func (d *NetDiscoverer) discoverLocalListeners(ctx context.Context, in chan<- [] case <-ctx.Done(): case in <- tggs: } + return nil } @@ -184,7 +191,18 @@ func (e *localListenersExec) discover(ctx context.Context) ([]byte, error) { execCtx, cancel := context.WithTimeout(ctx, e.timeout) defer cancel() - cmd := exec.CommandContext(execCtx, e.binPath, "tcp") // TODO: tcp6? + // TCPv4 and UPDv4 sockets in LISTEN state + // https://github.com/netdata/netdata/blob/master/src/collectors/plugins.d/local_listeners.c + args := []string{ + "no-udp6", + "no-tcp6", + "no-local", + "no-inbound", + "no-outbound", + "no-namespaces", + } + + cmd := exec.CommandContext(execCtx, e.binPath, args...) bs, err := cmd.Output() if err != nil { diff --git a/agent/discovery/sd/pipeline/config.go b/agent/discovery/sd/pipeline/config.go index faed30e36..9e02eced9 100644 --- a/agent/discovery/sd/pipeline/config.go +++ b/agent/discovery/sd/pipeline/config.go @@ -5,16 +5,16 @@ package pipeline import ( "errors" "fmt" - "github.com/netdata/go.d.plugin/agent/discovery/sd/hostsocket" + "github.com/netdata/go.d.plugin/agent/discovery/sd/hostsocket" "github.com/netdata/go.d.plugin/agent/discovery/sd/kubernetes" ) type Config struct { Name string `yaml:"name"` - Discovery DiscoveryConfig `yaml:"discovery"` + Discovery DiscoveryConfig `yaml:"discover"` Classify []ClassifyRuleConfig `yaml:"classify"` - Compose []ComposeRuleConfig `yaml:"compose"` // TODO: "jobs"? + Compose []ComposeRuleConfig `yaml:"jobs"` } type ( diff --git a/agent/discovery/sd/pipeline/funcmap.go b/agent/discovery/sd/pipeline/funcmap.go index d49b0d3e3..3cf8de261 100644 --- a/agent/discovery/sd/pipeline/funcmap.go +++ b/agent/discovery/sd/pipeline/funcmap.go @@ -8,15 +8,19 @@ import ( "github.com/Masterminds/sprig/v3" "github.com/bmatcuk/doublestar/v4" + "github.com/netdata/go.d.plugin/pkg/matcher" ) func newFuncMap() template.FuncMap { custom := map[string]interface{}{ - "glob": globAny, - "re": regexpAny, + "match": funcMatchAny, + "glob": func(value, pattern string, patterns ...string) bool { + return funcMatchAny("glob", value, pattern, patterns...) + }, } fm := sprig.HermeticTxtFuncMap() + for name, fn := range custom { fm[name] = fn } @@ -24,30 +28,30 @@ func newFuncMap() template.FuncMap { return fm } -func globAny(value, pattern string, rest ...string) bool { - switch len(rest) { +func funcMatchAny(typ, value, pattern string, patterns ...string) bool { + switch len(patterns) { case 0: - return globOnce(value, pattern) + return funcMatch(typ, value, pattern) default: - return globOnce(value, pattern) || globAny(value, rest[0], rest[1:]...) + return funcMatch(typ, value, pattern) || funcMatchAny(typ, value, patterns[0], patterns[1:]...) } } -func regexpAny(value, pattern string, rest ...string) bool { - switch len(rest) { - case 0: - return regexpOnce(value, pattern) +func funcMatch(typ string, value, pattern string) bool { + switch typ { + case "glob", "": + m, err := matcher.NewGlobMatcher(pattern) + return err == nil && m.MatchString(value) + case "sp": + m, err := matcher.NewSimplePatternsMatcher(pattern) + return err == nil && m.MatchString(value) + case "re": + ok, err := regexp.MatchString(pattern, value) + return err == nil && ok + case "dstar": + ok, err := doublestar.Match(pattern, value) + return err == nil && ok default: - return regexpOnce(value, pattern) || regexpAny(value, rest[0], rest[1:]...) + return false } } - -func globOnce(value, pattern string) bool { - ok, err := doublestar.Match(pattern, value) - return err == nil && ok -} - -func regexpOnce(value, pattern string) bool { - ok, err := regexp.MatchString(pattern, value) - return err == nil && ok -} diff --git a/agent/discovery/sd/pipeline/funcmap_test.go b/agent/discovery/sd/pipeline/funcmap_test.go index c8ced5170..3de71ef70 100644 --- a/agent/discovery/sd/pipeline/funcmap_test.go +++ b/agent/discovery/sd/pipeline/funcmap_test.go @@ -3,93 +3,79 @@ package pipeline import ( - "fmt" "testing" "github.com/stretchr/testify/assert" ) -func Test_globAny(t *testing.T) { +func Test_funcMatchAny(t *testing.T) { tests := map[string]struct { + typ string patterns []string value string wantMatch bool }{ - "one param, matches": { + "dstar: one param, matches": { wantMatch: true, + typ: "dstar", patterns: []string{"*"}, value: "value", }, - "one param, matches with *": { + "dstar: one param, matches with *": { wantMatch: true, + typ: "dstar", patterns: []string{"**/value"}, value: "/one/two/three/value", }, - "one param, not matches": { + "dstar: one param, not matches": { wantMatch: false, + typ: "dstar", patterns: []string{"Value"}, value: "value", }, - "several params, last one matches": { + "dstar: several params, last one matches": { wantMatch: true, + typ: "dstar", patterns: []string{"not", "matches", "*"}, value: "value", }, - "several params, no matches": { + "dstar: several params, no matches": { wantMatch: false, + typ: "dstar", patterns: []string{"not", "matches", "really"}, value: "value", }, - } - - for name, test := range tests { - name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value) - ok := globAny(test.value, test.patterns[0], test.patterns[1:]...) - - if test.wantMatch { - assert.Truef(t, ok, name) - } else { - assert.Falsef(t, ok, name) - } - } -} - -func Test_regexpAny(t *testing.T) { - tests := map[string]struct { - patterns []string - value string - wantMatch bool - }{ - "one param, matches": { + "re: one param, matches": { wantMatch: true, + typ: "re", patterns: []string{"^value$"}, value: "value", }, - "one param, not matches": { + "re: one param, not matches": { wantMatch: false, + typ: "re", patterns: []string{"^Value$"}, value: "value", }, - "several params, last one matches": { + "re: several params, last one matches": { wantMatch: true, + typ: "re", patterns: []string{"not", "matches", "va[lue]{3}"}, value: "value", }, - "several params, no matches": { + "re: several params, no matches": { wantMatch: false, + typ: "re", patterns: []string{"not", "matches", "val[^l]ue"}, value: "value", }, } for name, test := range tests { - name := fmt.Sprintf("name: %s, patterns: '%v', value: '%s'", name, test.patterns, test.value) - ok := regexpAny(test.value, test.patterns[0], test.patterns[1:]...) + t.Run(name, func(t *testing.T) { + ok := funcMatchAny(test.typ, test.value, test.patterns[0], test.patterns[1:]...) - if test.wantMatch { - assert.Truef(t, ok, name) - } else { - assert.Falsef(t, ok, name) - } + assert.Equal(t, test.wantMatch, ok) + }) } } diff --git a/agent/discovery/sd/pipeline/pipeline.go b/agent/discovery/sd/pipeline/pipeline.go index 1a1eb69f9..025a944b2 100644 --- a/agent/discovery/sd/pipeline/pipeline.go +++ b/agent/discovery/sd/pipeline/pipeline.go @@ -151,6 +151,7 @@ func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group { for _, cfg := range configs { cfg.SetProvider(tgg.Provider()) cfg.SetSource(tgg.Source()) + cfg.SetSourceType("discovered") } targetsCache[hash] = configs changed = true @@ -175,7 +176,11 @@ func (p *Pipeline) processGroup(tgg model.TargetGroup) *confgroup.Group { } // TODO: deepcopy? - cfgGroup := &confgroup.Group{Source: tgg.Source()} + cfgGroup := &confgroup.Group{ + Source: tgg.Source(), + SourceType: "discovered", + } + for _, cfgs := range targetsCache { cfgGroup.Configs = append(cfgGroup.Configs, cfgs...) } diff --git a/agent/discovery/sd/pipeline/pipeline_test.go b/agent/discovery/sd/pipeline/pipeline_test.go index ae6c5991a..32545dde8 100644 --- a/agent/discovery/sd/pipeline/pipeline_test.go +++ b/agent/discovery/sd/pipeline/pipeline_test.go @@ -5,6 +5,7 @@ package pipeline import ( "context" "fmt" + "github.com/bmatcuk/doublestar/v4" "strings" "testing" "time" @@ -18,6 +19,14 @@ import ( "gopkg.in/yaml.v2" ) +func TestNew2(t *testing.T) { + //cmdline := `/opt/opt/couchbase/lib/erlang/erts-13.2.2.3/bin/beam.smp -A 16 -sbt u -P 327680 -K true -swt low -sbwt none -MMmcs 30 -e102400 -S 16:16 -- -root /opt/couchbase/lib/erlang -bindir /opt/couchbase/lib/erlang/erts-13.2.2.3/bin -progname erl -- -home /tmp -- -smp enable -setcookie nocookie -kernel logger [{handler, default, undefined}] -kernel prevent_overlapping_partitions false -user user_io -run child_erlang child_start ns_bootstrap -- -smp enable -kernel logger [{handler, default, undefined}] inetrc "/opt/couchbase/etc/couchbase/hosts.cfg" dist_config_file "/opt/couchbase/var/lib/couchbase/config/dist_cfg" prevent_overlapping_partitions false -proto_dist cb -epmd_module cb_epmd -start_epmd false -ssl_dist_optfile /opt/couchbase/etc/couchbase/ssl_dist_opts -kernel global_enable_tracing false -couch_ini /opt/couchbase/etc/couchdb/default.ini /opt/couchbase/etc/couchdb/default.d/capi.ini /opt/couchbase/etc/couchdb/default.d/geocouch.ini /opt/couchbase/etc/couchdb/local.ini` + cmdline := `/opt/opt/couchbase/lib/erlang/erts-13.2.2.3/bin/beam.smp qqq` + //m, _ := matcher.NewGlobMatcher("/*/couchbase*") + //fmt.Println(m.MatchString(cmdline)) + fmt.Println(doublestar.Match("/**/couchbase**", cmdline)) +} + func TestNew(t *testing.T) { tests := map[string]struct { config string diff --git a/agent/discovery/sd/pipeline/qq.yaml b/agent/discovery/sd/pipeline/qq.yaml deleted file mode 100644 index e2ed5e402..000000000 --- a/agent/discovery/sd/pipeline/qq.yaml +++ /dev/null @@ -1,34 +0,0 @@ -name: qqq -discovery: - k8s: - - pod: - tags: "pod" - local_mode: yes - service: - tags: "service" - hostsocket: - net: - tags: "netsocket" - unix: - tags: "unixsocket" - docker: - - address: "1" - tags: "qq" - - -classify: - - name: "name" - selector: "k8s" - tags: "apps" - match: - - tags: "apache" - expr: '{{ and (eq .Port "8161") (glob .Image "**/activemq*") }}' - -compose: - - name: "Applications" - selector: "apps" - config: - - selector: "apache" - template: | - module: bind - name: bind-{{.TUID}} diff --git a/agent/functions/function.go b/agent/functions/function.go index 46a728994..c23301c4d 100644 --- a/agent/functions/function.go +++ b/agent/functions/function.go @@ -13,17 +13,20 @@ import ( ) type Function struct { - key string - UID string - Timeout time.Duration - Name string - Args []string - Payload []byte + key string + UID string + Timeout time.Duration + Name string + Args []string + Payload []byte + Permissions string + Source string + ContentType string } func (f *Function) String() string { - return fmt.Sprintf("key: %s, uid: %s, timeout: %s, function: %s, args: %v, payload: %s", - f.key, f.UID, f.Timeout, f.Name, f.Args, string(f.Payload)) + return fmt.Sprintf("key: '%s', uid: '%s', timeout: '%s', function: '%s', args: '%v', permissions: '%s', source: '%s', contentType: '%s', payload: '%s'", + f.key, f.UID, f.Timeout, f.Name, f.Args, f.Permissions, f.Source, f.ContentType, string(f.Payload)) } func parseFunction(s string) (*Function, error) { @@ -34,8 +37,9 @@ func parseFunction(s string) (*Function, error) { if err != nil { return nil, err } - if len(parts) != 4 { - return nil, fmt.Errorf("unexpected number of words: want 4, got %d (%v)", len(parts), parts) + + if n := len(parts); n != 6 && n != 7 { + return nil, fmt.Errorf("unexpected number of words: want 6 or 7, got %d (%v)", n, parts) } timeout, err := strconv.ParseInt(parts[2], 10, 64) @@ -43,14 +47,21 @@ func parseFunction(s string) (*Function, error) { return nil, err } + // 'FUNCTION_PAYLOAD 5d50db31d7e446768809b95382789257 120 \"config go.d:collector:example:jobs add example3\" \"method=api,role=god,ip=10.20.4.44\" \"text/yaml\"' cmd := strings.Split(parts[3], " ") fn := &Function{ - key: parts[0], - UID: parts[1], - Timeout: time.Duration(timeout) * time.Second, - Name: cmd[0], - Args: cmd[1:], + key: parts[0], + UID: parts[1], + Timeout: time.Duration(timeout) * time.Second, + Name: cmd[0], + Args: cmd[1:], + Permissions: parts[4], + Source: parts[5], + } + + if len(parts) == 7 { + fn.ContentType = parts[6] } return fn, nil diff --git a/agent/functions/manager.go b/agent/functions/manager.go index 760780cff..189ec4c76 100644 --- a/agent/functions/manager.go +++ b/agent/functions/manager.go @@ -5,12 +5,15 @@ package functions import ( "bufio" "context" + "fmt" "io" "log/slog" "os" "strings" "sync" + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/safewriter" "github.com/netdata/go.d.plugin/logger" "github.com/mattn/go-isatty" @@ -25,6 +28,7 @@ func NewManager() *Manager { slog.String("component", "functions manager"), ), Input: os.Stdin, + api: netdataapi.New(safewriter.Stdout), mux: &sync.Mutex{}, FunctionRegistry: make(map[string]func(Function)), } @@ -34,18 +38,11 @@ type Manager struct { *logger.Logger Input io.Reader + api *netdataapi.API mux *sync.Mutex FunctionRegistry map[string]func(Function) } -func (m *Manager) Register(name string, fn func(Function)) { - if fn == nil { - m.Warningf("not registering '%s': nil function", name) - return - } - m.addFunction(name, fn) -} - func (m *Manager) Run(ctx context.Context) { m.Info("instance is started") defer func() { m.Info("instance is stopped") }() @@ -102,19 +99,25 @@ func (m *Manager) run(r io.Reader) { function, ok := m.lookupFunction(fn.Name) if !ok { m.Infof("skipping execution of '%s': unregistered function", fn.Name) + m.api.FUNCRESULT(fn.UID, "application/json", jsonErrorf("unregistered function: %s", fn.Name), "501") continue } if function == nil { m.Warningf("skipping execution of '%s': nil function registered", fn.Name) + m.api.FUNCRESULT(fn.UID, "application/json", jsonErrorf("nil function: %s", fn.Name), "501") continue } - m.Debugf("executing function: '%s'", fn.String()) function(*fn) } } -func (m *Manager) addFunction(name string, fn func(Function)) { +func (m *Manager) Register(name string, fn func(Function)) { + if fn == nil { + m.Warningf("not registering '%s': nil function", name) + return + } + m.mux.Lock() defer m.mux.Unlock() @@ -126,6 +129,16 @@ func (m *Manager) addFunction(name string, fn func(Function)) { m.FunctionRegistry[name] = fn } +func (m *Manager) Unregister(name string) { + m.mux.Lock() + defer m.mux.Unlock() + + if _, ok := m.FunctionRegistry[name]; !ok { + delete(m.FunctionRegistry, name) + m.Debugf("unregistering function '%s'", name) + } +} + func (m *Manager) lookupFunction(name string) (func(Function), bool) { m.mux.Lock() defer m.mux.Unlock() @@ -133,3 +146,10 @@ func (m *Manager) lookupFunction(name string) (func(Function), bool) { f, ok := m.FunctionRegistry[name] return f, ok } + +func jsonErrorf(format string, a ...any) string { + msg := fmt.Sprintf(format, a...) + msg = strings.ReplaceAll(msg, "\n", " ") + + return fmt.Sprintf(`{ "error": "%s" }`+"\n", msg) +} diff --git a/agent/jobmgr/cache.go b/agent/jobmgr/cache.go index 53a1f7325..422674562 100644 --- a/agent/jobmgr/cache.go +++ b/agent/jobmgr/cache.go @@ -4,22 +4,72 @@ package jobmgr import ( "context" + "sync" "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" ) -func newRunningJobsCache() *runningJobsCache { - return &runningJobsCache{} +func newDiscoveredConfigsCache() *discoveredConfigs { + return &discoveredConfigs{ + items: make(map[string]map[uint64]confgroup.Config), + } +} + +func newSeenConfigCache() *seenConfigs { + return &seenConfigs{ + items: make(map[string]*seenConfig), + } +} + +func newExposedConfigCache() *exposedConfigs { + return &exposedConfigs{ + items: make(map[string]*seenConfig), + } } -func newRetryingJobsCache() *retryingJobsCache { - return &retryingJobsCache{} +func newRunningJobsCache() *runningJobs { + return &runningJobs{ + mux: sync.Mutex{}, + items: make(map[string]*module.Job), + } +} + +func newRetryingTasksCache() *retryingTasks { + return &retryingTasks{ + items: make(map[string]*retryTask), + } } type ( - runningJobsCache map[string]bool - retryingJobsCache map[uint64]retryTask + discoveredConfigs struct { + // [Source][Hash] + items map[string]map[uint64]confgroup.Config + } + seenConfigs struct { + // [cfg.UID()] + items map[string]*seenConfig + } + exposedConfigs struct { + // [cfg.FullName()] + items map[string]*seenConfig + } + seenConfig struct { + cfg confgroup.Config + status dyncfgStatus + } + + runningJobs struct { + mux sync.Mutex + // [cfg.FullName()] + items map[string]*module.Job + } + + retryingTasks struct { + // [cfg.UID()] + items map[string]*retryTask + } retryTask struct { cancel context.CancelFunc timeout int @@ -27,23 +77,112 @@ type ( } ) -func (c runningJobsCache) put(cfg confgroup.Config) { - c[cfg.FullName()] = true +func (c *discoveredConfigs) add(group *confgroup.Group) (added, removed []confgroup.Config) { + cfgs, ok := c.items[group.Source] + if !ok { + cfgs = make(map[uint64]confgroup.Config) + c.items[group.Source] = cfgs + } + + seen := make(map[uint64]bool) + + for _, cfg := range group.Configs { + hash := cfg.Hash() + seen[hash] = true + + if _, ok := cfgs[hash]; ok { + continue + } + + cfgs[hash] = cfg + added = append(added, cfg) + } + + for hash, cfg := range cfgs { + if !seen[hash] { + delete(cfgs, hash) + removed = append(removed, cfg) + } + } + + if len(cfgs) == 0 { + delete(c.items, group.Source) + } + + return added, removed +} + +func (c *seenConfigs) add(sj *seenConfig) { + c.items[sj.cfg.UID()] = sj +} +func (c *seenConfigs) remove(cfg confgroup.Config) { + delete(c.items, cfg.UID()) +} +func (c *seenConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) { + v, ok := c.items[cfg.UID()] + return v, ok +} + +func (c *exposedConfigs) add(sj *seenConfig) { + c.items[sj.cfg.FullName()] = sj +} +func (c *exposedConfigs) remove(cfg confgroup.Config) { + delete(c.items, cfg.FullName()) +} +func (c *exposedConfigs) lookup(cfg confgroup.Config) (*seenConfig, bool) { + v, ok := c.items[cfg.FullName()] + return v, ok } -func (c runningJobsCache) remove(cfg confgroup.Config) { - delete(c, cfg.FullName()) + +func (c *exposedConfigs) lookupByName(module, job string) (*seenConfig, bool) { + key := module + "_" + job + if module == job { + key = job + } + v, ok := c.items[key] + return v, ok +} + +func (c *runningJobs) lock() { + c.mux.Lock() +} +func (c *runningJobs) unlock() { + c.mux.Unlock() +} +func (c *runningJobs) add(fullName string, job *module.Job) { + c.items[fullName] = job } -func (c runningJobsCache) has(cfg confgroup.Config) bool { - return c[cfg.FullName()] +func (c *runningJobs) remove(fullName string) { + delete(c.items, fullName) +} +func (c *runningJobs) has(fullName string) bool { + _, ok := c.lookup(fullName) + return ok +} +func (c *runningJobs) lookup(fullName string) (*module.Job, bool) { + j, ok := c.items[fullName] + return j, ok +} +func (c *runningJobs) forEach(fn func(fullName string, job *module.Job)) { + for k, j := range c.items { + fn(k, j) + } } -func (c retryingJobsCache) put(cfg confgroup.Config, retry retryTask) { - c[cfg.Hash()] = retry +func (c *retryingTasks) add(cfg confgroup.Config, retry *retryTask) { + c.items[cfg.UID()] = retry +} +func (c *retryingTasks) remove(cfg confgroup.Config) { + if v, ok := c.lookup(cfg); ok { + v.cancel() + } + delete(c.items, cfg.UID()) } -func (c retryingJobsCache) remove(cfg confgroup.Config) { - delete(c, cfg.Hash()) +func (c *retryingTasks) has(cfg confgroup.Config) bool { + _, ok := c.items[cfg.UID()] + return ok } -func (c retryingJobsCache) lookup(cfg confgroup.Config) (retryTask, bool) { - v, ok := c[cfg.Hash()] +func (c *retryingTasks) lookup(cfg confgroup.Config) (*retryTask, bool) { + v, ok := c.items[cfg.UID()] return v, ok } diff --git a/agent/jobmgr/di.go b/agent/jobmgr/di.go index fa567b2ce..98a274877 100644 --- a/agent/jobmgr/di.go +++ b/agent/jobmgr/di.go @@ -4,6 +4,7 @@ package jobmgr import ( "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" "github.com/netdata/go.d.plugin/agent/vnodes" ) @@ -12,21 +13,27 @@ type FileLocker interface { Unlock(name string) error } -type Vnodes interface { - Lookup(key string) (*vnodes.VirtualNode, bool) -} - -type StatusSaver interface { +type FileStatus interface { Save(cfg confgroup.Config, state string) Remove(cfg confgroup.Config) } -type StatusStore interface { +type FileStatusStore interface { Contains(cfg confgroup.Config, states ...string) bool } -type Dyncfg interface { - Register(cfg confgroup.Config) - Unregister(cfg confgroup.Config) - UpdateStatus(cfg confgroup.Config, status, payload string) +type Vnodes interface { + Lookup(key string) (*vnodes.VirtualNode, bool) +} + +type FunctionRegistry interface { + Register(name string, reg func(functions.Function)) + Unregister(name string) +} + +type DyncfgAPI interface { + CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) + CONFIGDELETE(id string) + CONFIGSTATUS(id, status string) + FUNCRESULT(uid, contentType, payload, code string) } diff --git a/agent/jobmgr/dyncfg.go b/agent/jobmgr/dyncfg.go new file mode 100644 index 000000000..7aaac90e3 --- /dev/null +++ b/agent/jobmgr/dyncfg.go @@ -0,0 +1,635 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "encoding/json" + "fmt" + "log/slog" + "strconv" + "strings" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/logger" + + "gopkg.in/yaml.v2" +) + +type dyncfgStatus int + +const ( + _ dyncfgStatus = iota + dyncfgAccepted + dyncfgRunning + dyncfgFailed + dyncfgIncomplete + dyncfgDisabled +) + +func (s dyncfgStatus) String() string { + switch s { + case dyncfgAccepted: + return "accepted" + case dyncfgRunning: + return "running" + case dyncfgFailed: + return "failed" + case dyncfgIncomplete: + return "incomplete" + case dyncfgDisabled: + return "disabled" + default: + return "unknown" + } +} + +const ( + dyncfgIDPrefix = "go.d:collector:" + dyncfgPath = "/collectors/jobs" +) + +func dyncfgModID(name string) string { + return fmt.Sprintf("%s:%s", dyncfgIDPrefix, name) +} +func dyncfgJobID(cfg confgroup.Config) string { + return fmt.Sprintf("%s:%s:%s", dyncfgIDPrefix, cfg.Module(), cfg.Name()) +} + +func dyncfgModCmds() string { + return "add schema enable disable test" +} +func dyncfgJobCmds(cfg confgroup.Config) string { + cmds := "schema get enable disable update restart test" + if cfg.SourceType() == "dyncfg" { + cmds += " remove" + } + return cmds +} + +func (m *Manager) dyncfgModuleCreate(name string) { + id := dyncfgModID(name) + path := dyncfgPath + cmds := dyncfgModCmds() + typ := "template" + src := "internal" + m.api.CONFIGCREATE(id, dyncfgAccepted.String(), typ, path, src, src, cmds) +} + +func (m *Manager) dyncfgJobCreate(cfg confgroup.Config, status dyncfgStatus) { + id := dyncfgJobID(cfg) + path := dyncfgPath + cmds := dyncfgJobCmds(cfg) + typ := "job" + m.api.CONFIGCREATE(id, status.String(), typ, path, cfg.SourceType(), cfg.Source(), cmds) +} + +func (m *Manager) dyncfgJobRemove(cfg confgroup.Config) { + m.api.CONFIGDELETE(dyncfgJobID(cfg)) +} + +func (m *Manager) dyncfgJobStatus(cfg confgroup.Config, status dyncfgStatus) { + m.api.CONFIGSTATUS(dyncfgJobID(cfg), status.String()) +} + +func (m *Manager) dyncfgConfig(fn functions.Function) { + if len(fn.Args) < 2 { + m.Warningf("dyncfg: %s: missing required arguments, want 3 got %d", fn.Name, len(fn.Args)) + m.dyncfgRespf(fn, 400, "Missing required arguments. Need at least 2, but got %d.", len(fn.Args)) + return + } + + m.mux.Lock() + defer m.mux.Unlock() + + select { + case <-m.ctx.Done(): + m.dyncfgRespf(fn, 503, "Job manager is shutting down.") + return + default: + } + + action := strings.ToLower(fn.Args[1]) + + m.Infof("QQ FN(%s): '%s'", action, fn) + + switch action { + case "test": + m.dyncfgConfigTest(fn) + case "schema": + m.dyncfgConfigSchema(fn) + case "get": + m.dyncfgConfigGet(fn) + case "remove": + m.dyncfgConfigRemove(fn) + case "restart": + m.dyncfgConfigRestart(fn) + case "enable": + m.dyncfgConfigEnable(fn) + case "disable": + m.dyncfgConfigDisable(fn) + case "add": + m.dyncfgConfigAdd(fn) + case "update": + m.dyncfgConfigUpdate(fn) + default: + m.Warningf("dyncfg: function '%s' not implemented", fn.String()) + m.dyncfgRespf(fn, 501, "Function '%s' is not implemented.", fn.Name) + } +} + +func (m *Manager) dyncfgConfigTest(fn functions.Function) { + id := fn.Args[0] + mn, ok := extractModuleName(id) + if !ok { + m.Warningf("dyncfg: test: could not extract module and job from id (%s)", id) + m.dyncfgRespf(fn, 400, + "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id) + return + } + + creator, ok := m.Modules.Lookup(mn) + if !ok { + m.Warningf("dyncfg: test: module %s not found", mn) + m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn) + return + } + + cfg, err := configFromPayload(fn) + if err != nil { + m.Warningf("dyncfg: test: module %s: failed to create config from payload: %v", mn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err) + return + } + + cfg.SetModule(mn) + cfg.SetName("test") + + job := creator.Create() + + if err := applyConfig(cfg, job); err != nil { + m.Warningf("dyncfg: test: module %s: failed to apply config: %v", mn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err) + return + } + + job.GetBase().Logger = logger.New().With( + slog.String("collector", cfg.Module()), + slog.String("job", cfg.Name()), + ) + + defer job.Cleanup() + + if err := job.Init(); err != nil { + m.dyncfgRespf(fn, 500, "Job initialization failed: %v", err) + return + } + if err := job.Check(); err != nil { + m.dyncfgRespf(fn, 503, "Job check failed: %v", err) + return + } + + m.dyncfgRespf(fn, 200, "") +} + +func (m *Manager) dyncfgConfigSchema(fn functions.Function) { + id := fn.Args[0] + mn, ok := extractModuleName(id) + if !ok { + m.Warningf("dyncfg: schema: could not extract module from id (%s)", id) + m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id) + return + } + + mod, ok := m.Modules.Lookup(mn) + if !ok { + m.Warningf("dyncfg: schema: module %s not found", mn) + m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn) + return + } + + if mod.JobConfigSchema == "" { + m.Warningf("dyncfg: schema: module %s: schema not found", mn) + m.dyncfgRespf(fn, 500, "Module %s configuration schema not found.", mn) + return + } + + m.dyncfgRespPayload(fn, mod.JobConfigSchema) +} + +func (m *Manager) dyncfgConfigGet(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.Warningf("dyncfg: get: could not extract module and job from id (%s)", id) + m.dyncfgRespf(fn, 400, + "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id) + return + } + + creator, ok := m.Modules.Lookup(mn) + if !ok { + m.Warningf("dyncfg: get: module %s not found", mn) + m.dyncfgRespf(fn, 404, "The specified module '%s' is not registered.", mn) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.Warningf("dyncfg: get: module %s job %s not found", mn, jn) + m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn) + return + } + + mod := creator.Create() + + if err := applyConfig(ecfg.cfg, mod); err != nil { + m.Warningf("dyncfg: get: module %s job %s failed to apply config: %v", mn, jn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err) + return + } + + conf := mod.Configuration() + if conf == nil { + m.Warningf("dyncfg: get: module %s: configuration not found", mn) + m.dyncfgRespf(fn, 500, "Module %s does not provide configuration.", mn) + return + } + + bs, err := json.Marshal(conf) + if err != nil { + m.Warningf("dyncfg: get: module %s job %s failed marshal config: %v", mn, jn, err) + m.dyncfgRespf(fn, 500, "Failed to convert configuration into JSON: %v.", err) + return + } + + m.dyncfgRespPayload(fn, string(bs)) +} + +func (m *Manager) dyncfgConfigAdd(fn functions.Function) { + if len(fn.Args) < 3 { + m.Warningf("dyncfg: add: missing required arguments, want 3 got %d", len(fn.Args)) + m.dyncfgRespf(fn, 400, "Missing required arguments. Need at least 3, but got %d.", len(fn.Args)) + return + } + + id := fn.Args[0] + jn := fn.Args[2] + mn, ok := extractModuleName(id) + if !ok { + m.Warningf("dyncfg: add: could not extract module from id (%s)", id) + m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id) + return + } + + if len(fn.Payload) == 0 { + m.Warningf("dyncfg: add: module %s job %s missing configuration payload.", mn, jn) + m.dyncfgRespf(fn, 400, "Missing configuration payload.") + return + } + + cfg, err := configFromPayload(fn) + if err != nil { + m.Warningf("dyncfg: add: module %s job %s: failed to create config from payload: %v", mn, jn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err) + return + } + + m.dyncfgSetConfigMeta(cfg, mn, jn) + + scfg := &seenConfig{cfg: cfg} + m.seenConfigs.add(scfg) + + ecfg, ok := m.exposedConfigs.lookup(cfg) + if ok { + m.exposedConfigs.remove(ecfg.cfg) + m.stopRunningJob(ecfg.cfg.FullName()) + } + ecfg = scfg + m.exposedConfigs.add(ecfg) + + if _, err := m.createCollectorJob(ecfg.cfg); err != nil { + // TODO: remove from exposed + ecfg.status = dyncfgFailed + m.Warningf("dyncfg: add: module %s job %s: failed to apply config: %v", mn, jn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + ecfg.status = dyncfgAccepted + m.dyncfgRespf(fn, 202, "") + m.dyncfgJobCreate(ecfg.cfg, ecfg.status) +} + +func (m *Manager) dyncfgConfigRemove(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.Warningf("dyncfg: remove: could not extract module and job from id (%s)", id) + m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.Warningf("dyncfg: remove: module %s job %s not found", mn, jn) + m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn) + return + } + + if ecfg.cfg.SourceType() != "dyncfg" { + m.Warningf("dyncfg: remove: module %s job %s: can not remove jobs of type %s", mn, jn, ecfg.cfg.SourceType()) + m.dyncfgRespf(fn, 405, "Removing jobs of type '%s' is not supported. Only 'dyncfg' jobs can be removed.", ecfg.cfg.SourceType()) + return + } + + m.seenConfigs.remove(ecfg.cfg) + m.exposedConfigs.remove(ecfg.cfg) + m.stopRunningJob(ecfg.cfg.FullName()) + + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobRemove(ecfg.cfg) +} + +func (m *Manager) dyncfgConfigRestart(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.Warningf("dyncfg: restart: could not extract module from id (%s)", id) + m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.Warningf("dyncfg: restart: module %s job %s not found", mn, jn) + m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn) + return + } + + job, err := m.createCollectorJob(ecfg.cfg) + if err != nil { + m.Warningf("dyncfg: restart: module %s job %s: failed to apply config: %v", mn, jn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + switch ecfg.status { + case dyncfgAccepted, dyncfgDisabled: + m.Warningf("dyncfg: restart: module %s job %s: restarting not allowed in %s", mn, jn, ecfg.status) + m.dyncfgRespf(fn, 405, "Restarting data collection job is not allowed in '%s' state.", ecfg.status) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + case dyncfgRunning: + m.stopRunningJob(ecfg.cfg.FullName()) + default: + } + + if err := job.AutoDetection(); err != nil { + job.Cleanup() + ecfg.status = dyncfgFailed + m.dyncfgRespf(fn, 503, "Job restart failed: %v", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + m.startRunningJob(job) + ecfg.status = dyncfgRunning + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) +} + +func (m *Manager) dyncfgConfigEnable(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.Warningf("dyncfg: enable: could not extract module and job from id (%s)", id) + m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module and job name from ID. Provided ID: %s.", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.Warningf("dyncfg: enable: module %s job %s not found", mn, jn) + m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn) + return + } + + switch ecfg.status { + case dyncfgAccepted, dyncfgDisabled: + default: + // todo: now allowed + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + job, err := m.createCollectorJob(ecfg.cfg) + if err != nil { + ecfg.status = dyncfgFailed + m.Warningf("dyncfg: enable: module %s job %s: failed to apply config: %v", mn, jn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + // TODO: retry + if err := job.AutoDetection(); err != nil { + job.Cleanup() + ecfg.status = dyncfgFailed + m.dyncfgRespf(fn, 200, "Job enable failed: %v", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + ecfg.status = dyncfgRunning + m.startRunningJob(job) + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + +} + +func (m *Manager) dyncfgConfigDisable(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.Warningf("dyncfg: disable: could not extract module from id (%s)", id) + m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.Warningf("dyncfg: disable: module %s job %s not found", mn, jn) + m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn) + return + } + + switch ecfg.status { + case dyncfgDisabled: + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + case dyncfgRunning: + m.stopRunningJob(ecfg.cfg.FullName()) + default: + } + + ecfg.status = dyncfgDisabled + + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) +} + +func (m *Manager) dyncfgConfigUpdate(fn functions.Function) { + id := fn.Args[0] + mn, jn, ok := extractModuleJobName(id) + if !ok { + m.Warningf("dyncfg: update: could not extract module from id (%s)", id) + m.dyncfgRespf(fn, 400, "Invalid ID format. Could not extract module name from ID. Provided ID: %s.", id) + return + } + + ecfg, ok := m.exposedConfigs.lookupByName(mn, jn) + if !ok { + m.Warningf("dyncfg: update: module %s job %s not found", mn, jn) + m.dyncfgRespf(fn, 404, "The specified module '%s' job '%s' is not registered.", mn, jn) + return + } + + cfg, err := configFromPayload(fn) + if err != nil { + m.Warningf("dyncfg: update: module %s: failed to create config from payload: %v", mn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration format. Failed to create configuration from payload: %v.", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + m.dyncfgSetConfigMeta(cfg, mn, jn) + + if ecfg.status == dyncfgRunning && ecfg.cfg.UID() == cfg.UID() { + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + job, err := m.createCollectorJob(cfg) + if err != nil { + m.Warningf("dyncfg: update: module %s job %s: failed to apply config: %v", mn, jn, err) + m.dyncfgRespf(fn, 400, "Invalid configuration. Failed to apply configuration: %v.", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + if ecfg.status == dyncfgAccepted { + m.Warningf("dyncfg: update: module %s job %s: updating not allowed in %s", mn, jn, ecfg.status) + m.dyncfgRespf(fn, 403, "Updating data collection job is not allowed in current state: '%s'.", ecfg.status) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + if ecfg.cfg.SourceType() == "dyncfg" { + m.seenConfigs.remove(ecfg.cfg) + } + m.exposedConfigs.remove(ecfg.cfg) + m.stopRunningJob(ecfg.cfg.FullName()) + + scfg := &seenConfig{cfg: cfg} + m.seenConfigs.add(scfg) + m.exposedConfigs.add(scfg) + + if ecfg.status == dyncfgDisabled { + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(cfg, scfg.status) + return + } + + if err := job.AutoDetection(); err != nil { + job.Cleanup() + ecfg.status = dyncfgFailed + m.dyncfgRespf(fn, 200, "Job update failed: %v", err) + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) + return + } + + ecfg.status = dyncfgRunning + m.startRunningJob(job) + m.dyncfgRespf(fn, 200, "") + m.dyncfgJobStatus(ecfg.cfg, ecfg.status) +} + +func (m *Manager) dyncfgSetConfigMeta(cfg confgroup.Config, module, name string) { + cfg.SetProvider("dyncfg") + cfg.SetSource(fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, name)) + cfg.SetSourceType("dyncfg") + cfg.SetModule(module) + cfg.SetName(name) + if def, ok := m.ConfigDefaults.Lookup(module); ok { + cfg.ApplyDefaults(def) + } +} + +func (m *Manager) dyncfgRespPayload(fn functions.Function, payload string) { + m.api.FUNCRESULT(fn.UID, "application/json", payload, "200") +} + +func (m *Manager) dyncfgRespf(fn functions.Function, code int, msgf string, a ...any) { + if fn.UID == "" { + return + } + bs, _ := json.Marshal(struct { + Status int `json:"status"` + Message string `json:"message"` + }{ + Status: code, + Message: fmt.Sprintf(msgf, a...), + }) + m.api.FUNCRESULT(fn.UID, "application/json", string(bs), strconv.Itoa(code)) +} + +func configFromPayload(fn functions.Function) (confgroup.Config, error) { + var cfg confgroup.Config + + if fn.ContentType != "application/json" { + if err := yaml.Unmarshal(fn.Payload, &cfg); err != nil { + return nil, err + } + + return cfg, nil + } + + if err := json.Unmarshal(fn.Payload, &cfg); err != nil { + return nil, err + } + + return cfg.Clone() +} + +func extractModuleJobName(id string) (mn string, jn string, ok bool) { + if mn, ok = extractModuleName(id); !ok { + return "", "", false + } + if jn, ok = extractJobName(id); !ok { + return "", "", false + } + return mn, jn, true +} + +func extractModuleName(id string) (string, bool) { + id = strings.TrimPrefix(id, dyncfgIDPrefix) + i := strings.IndexByte(id, ':') + if i == -1 { + return id, id != "" + } + return id[:i], true +} + +func extractJobName(id string) (string, bool) { + i := strings.LastIndexByte(id, ':') + if i == -1 { + return "", false + } + return id[i+1:], true +} diff --git a/agent/jobmgr/manager.go b/agent/jobmgr/manager.go index 7088f84f9..4e6e2fe1a 100644 --- a/agent/jobmgr/manager.go +++ b/agent/jobmgr/manager.go @@ -13,57 +13,40 @@ import ( "time" "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/functions" "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/safewriter" + "github.com/netdata/go.d.plugin/agent/ticker" "github.com/netdata/go.d.plugin/logger" + "github.com/mattn/go-isatty" "gopkg.in/yaml.v2" ) -type Job interface { - Name() string - ModuleName() string - FullName() string - AutoDetection() bool - AutoDetectionEvery() int - RetryAutoDetection() bool - Tick(clock int) - Start() - Stop() - Cleanup() -} - -type jobStatus = string - -const ( - jobStatusRunning jobStatus = "running" // Check() succeeded - jobStatusRetrying jobStatus = "retrying" // Check() failed, but we need keep trying auto-detection - jobStatusStoppedFailed jobStatus = "stopped_failed" // Check() failed - jobStatusStoppedDupLocal jobStatus = "stopped_duplicate_local" // a job with the same FullName is running - jobStatusStoppedDupGlobal jobStatus = "stopped_duplicate_global" // a job with the same FullName is registered by another plugin - jobStatusStoppedRegErr jobStatus = "stopped_registration_error" // an error during registration (only 'too many open files') - jobStatusStoppedCreateErr jobStatus = "stopped_creation_error" // an error during creation (yaml unmarshal) -) +var isTerminal = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsTerminal(os.Stdin.Fd()) -func NewManager() *Manager { - np := noop{} +func New() *Manager { mgr := &Manager{ Logger: logger.New().With( slog.String("component", "job manager"), ), - Out: io.Discard, - FileLock: np, - StatusSaver: np, - StatusStore: np, - Vnodes: np, - Dyncfg: np, - - confGroupCache: confgroup.NewCache(), - - runningJobs: newRunningJobsCache(), - retryingJobs: newRetryingJobsCache(), - - addCh: make(chan confgroup.Config), - removeCh: make(chan confgroup.Config), + Out: io.Discard, + FileLock: noop{}, + FileStatus: noop{}, + FileStatusStore: noop{}, + Vnodes: noop{}, + FnReg: noop{}, + + discoveredConfigs: newDiscoveredConfigsCache(), + seenConfigs: newSeenConfigCache(), + exposedConfigs: newExposedConfigCache(), + runningJobs: newRunningJobsCache(), + retryingTasks: newRetryingTasksCache(), + + api: netdataapi.New(safewriter.Stdout), + mux: sync.Mutex{}, + started: make(chan struct{}), } return mgr @@ -72,210 +55,245 @@ func NewManager() *Manager { type Manager struct { *logger.Logger - PluginName string - Out io.Writer - Modules module.Registry + PluginName string + Out io.Writer + Modules module.Registry + ConfigDefaults confgroup.Registry - FileLock FileLocker - StatusSaver StatusSaver - StatusStore StatusStore - Vnodes Vnodes - Dyncfg Dyncfg + FileLock FileLocker + FileStatus FileStatus + FileStatusStore FileStatusStore + Vnodes Vnodes + FnReg FunctionRegistry - confGroupCache *confgroup.Cache - runningJobs *runningJobsCache - retryingJobs *retryingJobsCache + discoveredConfigs *discoveredConfigs + seenConfigs *seenConfigs + exposedConfigs *exposedConfigs + retryingTasks *retryingTasks + runningJobs *runningJobs - addCh chan confgroup.Config - removeCh chan confgroup.Config + api DyncfgAPI + ctx context.Context + mux sync.Mutex - queueMux sync.Mutex - queue []Job + started chan struct{} } func (m *Manager) Run(ctx context.Context, in chan []*confgroup.Group) { m.Info("instance is started") defer func() { m.cleanup(); m.Info("instance is stopped") }() + m.ctx = ctx + + m.FnReg.Register("config", m.dyncfgConfig) + + for name := range m.Modules { + m.dyncfgModuleCreate(name) + } var wg sync.WaitGroup wg.Add(1) - go func() { defer wg.Done(); m.runConfigGroupsHandling(ctx, in) }() + go func() { defer wg.Done(); m.runProcessDiscoveredConfigs(ctx, in) }() wg.Add(1) - go func() { defer wg.Done(); m.runConfigsHandling(ctx) }() + go func() { defer wg.Done(); m.runNotifyRunningJobs(ctx) }() - wg.Add(1) - go func() { defer wg.Done(); m.runRunningJobsHandling(ctx) }() + close(m.started) wg.Wait() <-ctx.Done() } -func (m *Manager) runConfigGroupsHandling(ctx context.Context, in chan []*confgroup.Group) { +func (m *Manager) runProcessDiscoveredConfigs(ctx context.Context, in chan []*confgroup.Group) { for { select { case <-ctx.Done(): return case groups := <-in: - for _, gr := range groups { - select { - case <-ctx.Done(): - return - default: - a, r := m.confGroupCache.Add(gr) - m.Debugf("received config group ('%s'): %d jobs (added: %d, removed: %d)", gr.Source, len(gr.Configs), len(a), len(r)) - sendConfigs(ctx, m.removeCh, r) - sendConfigs(ctx, m.addCh, a) - } - } + m.processDiscoveredConfigGroups(groups) } } } -func (m *Manager) runConfigsHandling(ctx context.Context) { - for { - select { - case <-ctx.Done(): - return - case cfg := <-m.addCh: - m.addConfig(ctx, cfg) - case cfg := <-m.removeCh: - m.removeConfig(cfg) +func (m *Manager) processDiscoveredConfigGroups(groups []*confgroup.Group) { + for _, gr := range groups { + a, r := m.discoveredConfigs.add(gr) + m.Debugf("received configs: %d/+%d/-%d (group '%s')", len(gr.Configs), len(a), len(r), gr.Source) + for _, cfg := range r { + m.removeDiscoveredConfig(cfg) + } + for _, cfg := range a { + m.addDiscoveredConfig(cfg) } } } -func (m *Manager) cleanup() { - for _, task := range *m.retryingJobs { - task.cancel() - } - for name := range *m.runningJobs { - _ = m.FileLock.Unlock(name) +func (m *Manager) addDiscoveredConfig(cfg confgroup.Config) { + m.mux.Lock() + defer m.mux.Unlock() + + scfg, ok := m.seenConfigs.lookup(cfg) + if !ok { + scfg = &seenConfig{cfg: cfg} + m.seenConfigs.add(scfg) } - // TODO: m.Dyncfg.Register() ? - m.stopRunningJobs() -} -func (m *Manager) addConfig(ctx context.Context, cfg confgroup.Config) { - task, isRetry := m.retryingJobs.lookup(cfg) - if isRetry { - task.cancel() - m.retryingJobs.remove(cfg) - } else { - m.Dyncfg.Register(cfg) + ecfg, ok := m.exposedConfigs.lookup(cfg) + if !ok { + ecfg = scfg + m.exposedConfigs.add(ecfg) } - if m.runningJobs.has(cfg) { - m.Infof("%s[%s] job is being served by another job, skipping it", cfg.Module(), cfg.Name()) - m.StatusSaver.Save(cfg, jobStatusStoppedDupLocal) - m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another job") + if !ok { + if _, err := m.createCollectorJob(cfg); err != nil { + ecfg.status = dyncfgFailed + m.dyncfgJobCreate(cfg, ecfg.status) + return + } + + ecfg.status = dyncfgAccepted + m.dyncfgJobCreate(cfg, ecfg.status) + if isTerminal { + m.dyncfgConfigEnable(functions.Function{ + Args: []string{dyncfgJobID(ecfg.cfg)}, + }) + } return } - job, err := m.createJob(cfg) - if err != nil { - m.Warningf("couldn't create %s[%s]: %v", cfg.Module(), cfg.Name(), err) - m.StatusSaver.Save(cfg, jobStatusStoppedCreateErr) - m.Dyncfg.UpdateStatus(cfg, "error", fmt.Sprintf("build error: %s", err)) + // TODO: fix retry + if scfg.cfg.UID() == ecfg.cfg.UID() { return } - cleanupJob := true - defer func() { - if cleanupJob { - job.Cleanup() - } - }() - - if isRetry { - job.AutoDetectEvery = task.timeout - job.AutoDetectTries = task.retries - } else if job.AutoDetectionEvery() == 0 { - switch { - case m.StatusStore.Contains(cfg, jobStatusRunning, jobStatusRetrying): - m.Infof("%s[%s] job last status is running/retrying, applying recovering settings", cfg.Module(), cfg.Name()) - job.AutoDetectEvery = 30 - job.AutoDetectTries = 11 - case isInsideK8sCluster() && cfg.Provider() == "file watcher": - m.Infof("%s[%s] is k8s job, applying recovering settings", cfg.Module(), cfg.Name()) - job.AutoDetectEvery = 10 - job.AutoDetectTries = 7 - } + sp, ep := scfg.cfg.SourceTypePriority(), ecfg.cfg.SourceTypePriority() + + if ep > sp || (ep == sp && ecfg.status == dyncfgRunning) { + return + } + if ep < sp { + m.stopRunningJob(ecfg.cfg.FullName()) + m.exposedConfigs.add(scfg) // replace + ecfg = scfg } - switch detection(job) { - case jobStatusRunning: - if ok, err := m.FileLock.Lock(cfg.FullName()); ok || err != nil && !isTooManyOpenFiles(err) { - cleanupJob = false - m.runningJobs.put(cfg) - m.StatusSaver.Save(cfg, jobStatusRunning) - m.Dyncfg.UpdateStatus(cfg, "running", "") - m.startJob(job) - } else if isTooManyOpenFiles(err) { - m.Error(err) - m.StatusSaver.Save(cfg, jobStatusStoppedRegErr) - m.Dyncfg.UpdateStatus(cfg, "error", "too many open files") - } else { - m.Infof("%s[%s] job is being served by another plugin, skipping it", cfg.Module(), cfg.Name()) - m.StatusSaver.Save(cfg, jobStatusStoppedDupGlobal) - m.Dyncfg.UpdateStatus(cfg, "error", "duplicate, served by another plugin") - } - case jobStatusRetrying: - m.Infof("%s[%s] job detection failed, will retry in %d seconds", cfg.Module(), cfg.Name(), job.AutoDetectionEvery()) - ctx, cancel := context.WithCancel(ctx) - m.retryingJobs.put(cfg, retryTask{ - cancel: cancel, - timeout: job.AutoDetectionEvery(), - retries: job.AutoDetectTries, + if isTerminal { + m.dyncfgConfigEnable(functions.Function{ + Args: []string{dyncfgJobID(ecfg.cfg)}, }) - go runRetryTask(ctx, m.addCh, cfg, time.Second*time.Duration(job.AutoDetectionEvery())) - m.StatusSaver.Save(cfg, jobStatusRetrying) - m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, will retry later") - case jobStatusStoppedFailed: - m.StatusSaver.Save(cfg, jobStatusStoppedFailed) - m.Dyncfg.UpdateStatus(cfg, "error", "job detection failed, stopping it") - default: - m.Warningf("%s[%s] job detection: unknown state", cfg.Module(), cfg.Name()) } + return } -func (m *Manager) removeConfig(cfg confgroup.Config) { - if m.runningJobs.has(cfg) { - m.stopJob(cfg.FullName()) - _ = m.FileLock.Unlock(cfg.FullName()) - m.runningJobs.remove(cfg) +func (m *Manager) removeDiscoveredConfig(cfg confgroup.Config) { + m.mux.Lock() + defer m.mux.Unlock() + + m.retryingTasks.remove(cfg) + + scfg, ok := m.seenConfigs.lookup(cfg) + if !ok { + return } + m.seenConfigs.remove(cfg) - if task, ok := m.retryingJobs.lookup(cfg); ok { - task.cancel() - m.retryingJobs.remove(cfg) + ecfg, ok := m.exposedConfigs.lookup(cfg) + if !ok { + return + } + if scfg.cfg.UID() == ecfg.cfg.UID() { + m.exposedConfigs.remove(cfg) + m.stopRunningJob(cfg.FullName()) + m.dyncfgJobRemove(cfg) } - m.StatusSaver.Remove(cfg) - m.Dyncfg.Unregister(cfg) + return } -func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) { +func (m *Manager) runNotifyRunningJobs(ctx context.Context) { + tk := ticker.New(time.Second) + defer tk.Stop() + + for { + select { + case <-ctx.Done(): + return + case clock := <-tk.C: + m.runningJobs.lock() + m.runningJobs.forEach(func(_ string, job *module.Job) { + job.Tick(clock) + }) + m.runningJobs.unlock() + } + } +} + +func (m *Manager) cleanup() { + m.mux.Lock() + defer m.mux.Unlock() + + m.FnReg.Unregister("config") + + m.runningJobs.lock() + defer m.runningJobs.unlock() + + m.runningJobs.forEach(func(key string, job *module.Job) { + job.Stop() + m.runningJobs.remove(key) + }) +} + +func (m *Manager) startRunningJob(job *module.Job) { + m.runningJobs.lock() + defer m.runningJobs.unlock() + + if job, ok := m.runningJobs.lookup(job.FullName()); ok { + job.Stop() + } + + go job.Start() + m.runningJobs.add(job.FullName(), job) +} + +func (m *Manager) stopRunningJob(name string) { + m.runningJobs.lock() + defer m.runningJobs.unlock() + + if job, ok := m.runningJobs.lookup(name); ok { + job.Stop() + m.runningJobs.remove(name) + } +} + +func (m *Manager) createCollectorJob(cfg confgroup.Config) (*module.Job, error) { creator, ok := m.Modules[cfg.Module()] if !ok { return nil, fmt.Errorf("can not find %s module", cfg.Module()) } + var vnode struct { + guid string + hostname string + labels map[string]string + } + + if cfg.Vnode() != "" { + n, ok := m.Vnodes.Lookup(cfg.Vnode()) + if !ok { + return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode()) + } + + vnode.guid = n.GUID + vnode.hostname = n.Hostname + vnode.labels = n.Labels + } + m.Debugf("creating %s[%s] job, config: %v", cfg.Module(), cfg.Name(), cfg) mod := creator.Create() - if err := unmarshal(cfg, mod); err != nil { - return nil, err - } - labels := make(map[string]string) - for name, value := range cfg.Labels() { - n, ok1 := name.(string) - v, ok2 := value.(string) - if ok1 && ok2 { - labels[n] = v - } + if err := applyConfig(cfg, mod); err != nil { + return nil, err } jobCfg := module.JobConfig{ @@ -286,21 +304,13 @@ func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) { UpdateEvery: cfg.UpdateEvery(), AutoDetectEvery: cfg.AutoDetectionRetry(), Priority: cfg.Priority(), - Labels: labels, - IsStock: isStockConfig(cfg), + Labels: makeLabels(cfg), + IsStock: cfg.SourceType() == "stock", Module: mod, Out: m.Out, - } - - if cfg.Vnode() != "" { - n, ok := m.Vnodes.Lookup(cfg.Vnode()) - if !ok { - return nil, fmt.Errorf("vnode '%s' is not found", cfg.Vnode()) - } - - jobCfg.VnodeGUID = n.GUID - jobCfg.VnodeHostname = n.Hostname - jobCfg.VnodeLabels = n.Labels + VnodeGUID: vnode.guid, + VnodeHostname: vnode.hostname, + VnodeLabels: vnode.labels, } job := module.NewJob(jobCfg) @@ -308,62 +318,31 @@ func (m *Manager) createJob(cfg confgroup.Config) (*module.Job, error) { return job, nil } -func detection(job Job) jobStatus { - if !job.AutoDetection() { - if job.RetryAutoDetection() { - return jobStatusRetrying - } else { - return jobStatusStoppedFailed - } - } - return jobStatusRunning -} - -func runRetryTask(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config, timeout time.Duration) { - t := time.NewTimer(timeout) - defer t.Stop() - - select { - case <-ctx.Done(): - case <-t.C: - sendConfig(ctx, out, cfg) - } -} - -func sendConfigs(ctx context.Context, out chan<- confgroup.Config, cfgs []confgroup.Config) { - for _, cfg := range cfgs { - sendConfig(ctx, out, cfg) - } -} - -func sendConfig(ctx context.Context, out chan<- confgroup.Config, cfg confgroup.Config) { - select { - case <-ctx.Done(): - return - case out <- cfg: - } -} - -func unmarshal(conf interface{}, module interface{}) error { - bs, err := yaml.Marshal(conf) +func applyConfig(cfg confgroup.Config, module any) error { + bs, err := yaml.Marshal(cfg) if err != nil { return err } return yaml.Unmarshal(bs, module) } +func isTooManyOpenFiles(err error) bool { + return err != nil && strings.Contains(err.Error(), "too many open files") +} + func isInsideK8sCluster() bool { host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") return host != "" && port != "" } -func isTooManyOpenFiles(err error) bool { - return err != nil && strings.Contains(err.Error(), "too many open files") -} - -func isStockConfig(cfg confgroup.Config) bool { - if !strings.HasPrefix(cfg.Provider(), "file") { - return false +func makeLabels(cfg confgroup.Config) map[string]string { + labels := make(map[string]string) + for name, value := range cfg.Labels() { + n, ok1 := name.(string) + v, ok2 := value.(string) + if ok1 && ok2 { + labels[n] = v + } } - return !strings.Contains(cfg.Source(), "/etc/netdata") + return labels } diff --git a/agent/jobmgr/manager_test.go b/agent/jobmgr/manager_test.go index 69dceda49..86e89cacb 100644 --- a/agent/jobmgr/manager_test.go +++ b/agent/jobmgr/manager_test.go @@ -3,102 +3,1251 @@ package jobmgr import ( - "bytes" - "context" - "sync" + "encoding/json" + "fmt" "testing" - "time" "github.com/netdata/go.d.plugin/agent/confgroup" - "github.com/netdata/go.d.plugin/agent/module" - "github.com/netdata/go.d.plugin/agent/safewriter" - "github.com/stretchr/testify/assert" + "github.com/netdata/go.d.plugin/agent/functions" ) -// TODO: tech dept -func TestNewManager(t *testing.T) { +func TestManager_Run_Dyncfg_Get(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[get] non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-get", + Args: []string{dyncfgJobID(cfg), "get"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-get 404 application/json 404 +{"status":404,"message":"The specified module 'success' job 'test' is not registered."} +FUNCTION_RESULT_END +`, + } + }, + }, + "[get] existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "1"). + Set("option_int", 1) + bs, _ := json.Marshal(cfg) + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: bs, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-get", + Args: []string{dyncfgJobID(cfg), "get"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 202 application/json 202 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-get 200 application/json 200 +{"option_str":"1","option_int":1} +FUNCTION_RESULT_END +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Add(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[add] dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted +`, + } + }, + }, + "[add] dyncfg:nok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted +`, + } + }, + }, + "[add] dyncfg:ok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Enable(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[enable] non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-enable 404 application/json 404 +{"status":404,"message":"The specified module 'success' job 'test' is not registered."} +FUNCTION_RESULT_END +`, + } + }, + }, + "[enable] dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "[enable] dyncfg:ok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "[enable] dyncfg:nok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status failed +`, + } + }, + }, + "[enable] dyncfg:nok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgFailed}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status failed + +FUNCTION_RESULT_BEGIN 3-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status failed +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Disable(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[disable] non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-disable 404 application/json 404 +{"status":404,"message":"The specified module 'success' job 'test' is not registered."} +FUNCTION_RESULT_END +`, + } + }, + }, + "[disable] dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + "[disable] dyncfg:ok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + "[disable] dyncfg:nok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status disabled +`, + } + }, + }, + "[disable] dyncfg:nok twice": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("fail", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:fail:jobs:test status disabled +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Restart(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[restart] non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-restart 404 application/json 404 +{"status":404,"message":"The specified module 'success' job 'test' is not registered."} +FUNCTION_RESULT_END +`, + } + }, + }, + "[restart] not enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-restart 403 application/json 403 +{"status":403,"message":"Restarting data collection job is not allowed in 'accepted' state."} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted +`, + } + }, + }, + "[restart] enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-restart 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "[restart] disabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(cfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-restart 403 application/json 403 +{"status":403,"message":"Restarting data collection job is not allowed in 'disabled' state."} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + "[restart] enabled dyncfg:ok multiple times": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "4-restart", + Args: []string{dyncfgJobID(cfg), "restart"}, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: cfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-restart 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 4-restart 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } +} + +func TestManager_Run_Dyncfg_Remove(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[remove] non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-remove", + Args: []string{dyncfgJobID(cfg), "remove"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-remove 404 application/json 404 +{"status":404,"message":"The specified module 'success' job 'test' is not registered."} +FUNCTION_RESULT_END +`, + } + }, + }, + "[remove] non-dyncfg": { + createSim: func() *runSim { + stockCfg := prepareStockCfg("success", "stock") + userCfg := prepareUserCfg("success", "user") + discCfg := prepareDiscoveredCfg("success", "discovered") + + return &runSim{ + do: func(mgr *Manager) { + mgr.processDiscoveredConfigGroups([]*confgroup.Group{ + prepareCfgGroup(stockCfg.Source(), "stock", stockCfg), + prepareCfgGroup(userCfg.Source(), "user", userCfg), + prepareCfgGroup(discCfg.Source(), "discovered", discCfg), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "1-remove", + Args: []string{dyncfgJobID(stockCfg), "remove"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-remove", + Args: []string{dyncfgJobID(userCfg), "remove"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-remove", + Args: []string{dyncfgJobID(discCfg), "remove"}, + }) + }, + wantDiscovered: []confgroup.Config{ + stockCfg, + userCfg, + discCfg, + }, + wantSeen: []seenConfig{ + {cfg: stockCfg, status: dyncfgAccepted}, + {cfg: userCfg, status: dyncfgAccepted}, + {cfg: discCfg, status: dyncfgAccepted}, + }, + wantExposed: []seenConfig{ + {cfg: stockCfg, status: dyncfgAccepted}, + {cfg: userCfg, status: dyncfgAccepted}, + {cfg: discCfg, status: dyncfgAccepted}, + }, + wantRunning: nil, + wantDyncfg: ` +CONFIG go.d:collector:success:jobs:stock create accepted job /collectors/success stock 'type=stock,module=success,job=stock' 'schema get enable disable update restart' 0x0000 0x0000 + +CONFIG go.d:collector:success:jobs:user create accepted job /collectors/success user 'type=user,module=success,job=user' 'schema get enable disable update restart' 0x0000 0x0000 + +CONFIG go.d:collector:success:jobs:discovered create accepted job /collectors/success discovered 'type=discovered,module=success,job=discovered' 'schema get enable disable update restart' 0x0000 0x0000 + +FUNCTION_RESULT_BEGIN 1-remove 405 application/json 405 +{"status":405,"message":"Removing jobs of type 'stock' is not supported. Only 'dyncfg' jobs can be removed."} +FUNCTION_RESULT_END + +FUNCTION_RESULT_BEGIN 2-remove 405 application/json 405 +{"status":405,"message":"Removing jobs of type 'user' is not supported. Only 'dyncfg' jobs can be removed."} +FUNCTION_RESULT_END + +FUNCTION_RESULT_BEGIN 3-remove 405 application/json 405 +{"status":405,"message":"Removing jobs of type 'discovered' is not supported. Only 'dyncfg' jobs can be removed."} +FUNCTION_RESULT_END +`, + } + }, + }, + "[remove] not enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-remove", + Args: []string{dyncfgJobID(cfg), "remove"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-remove 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test delete +`, + } + }, + }, + "[remove] enabled dyncfg:ok": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(cfg.Module()), "add", cfg.Name()}, + Payload: []byte("{}"), + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(cfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-remove", + Args: []string{dyncfgJobID(cfg), "remove"}, + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-remove 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test delete +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) + } } -// TODO: tech dept -func TestManager_Run(t *testing.T) { - groups := []*confgroup.Group{ - { - Source: "source", - Configs: []confgroup.Config{ - { - "name": "name", - "module": "success", - "update_every": module.UpdateEvery, - "autodetection_retry": module.AutoDetectionRetry, - "priority": module.Priority, - }, - { - "name": "name", - "module": "success", - "update_every": module.UpdateEvery + 1, - "autodetection_retry": module.AutoDetectionRetry, - "priority": module.Priority, - }, - { - "name": "name", - "module": "fail", - "update_every": module.UpdateEvery + 1, - "autodetection_retry": module.AutoDetectionRetry, - "priority": module.Priority, - }, +func TestManager_Run_Dyncfg_Update(t *testing.T) { + tests := map[string]struct { + createSim func() *runSim + }{ + "[update] non-existing": { + createSim: func() *runSim { + cfg := prepareDyncfgCfg("success", "test") + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-update", + Args: []string{dyncfgJobID(cfg), "update"}, + Payload: []byte("{}"), + }) + }, + wantDiscovered: nil, + wantSeen: nil, + wantExposed: nil, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-update 404 application/json 404 +{"status":404,"message":"The specified module 'success' job 'test' is not registered."} +FUNCTION_RESULT_END +`, + } }, }, + "[update] enabled dyncfg:ok with dyncfg:ok": { + createSim: func() *runSim { + origCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "1") + updCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "2") + origBs, _ := json.Marshal(origCfg) + updBs, _ := json.Marshal(updCfg) + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()}, + Payload: origBs, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-enable", + Args: []string{dyncfgJobID(origCfg), "enable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-update", + Args: []string{dyncfgJobID(origCfg), "update"}, + Payload: updBs, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: updCfg, status: dyncfgRunning}, + }, + wantExposed: []seenConfig{ + {cfg: updCfg, status: dyncfgRunning}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-enable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running + +FUNCTION_RESULT_BEGIN 3-update 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status running +`, + } + }, + }, + "[update] disabled dyncfg:ok with dyncfg:ok": { + createSim: func() *runSim { + origCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "1") + updCfg := prepareDyncfgCfg("success", "test"). + Set("option_str", "2") + origBs, _ := json.Marshal(origCfg) + updBs, _ := json.Marshal(updCfg) + + return &runSim{ + do: func(mgr *Manager) { + mgr.dyncfgConfig(functions.Function{ + UID: "1-add", + Args: []string{dyncfgModID(origCfg.Module()), "add", origCfg.Name()}, + Payload: origBs, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "2-disable", + Args: []string{dyncfgJobID(origCfg), "disable"}, + }) + mgr.dyncfgConfig(functions.Function{ + UID: "3-update", + Args: []string{dyncfgJobID(origCfg), "update"}, + Payload: updBs, + }) + }, + wantDiscovered: nil, + wantSeen: []seenConfig{ + {cfg: updCfg, status: dyncfgDisabled}, + }, + wantExposed: []seenConfig{ + {cfg: updCfg, status: dyncfgDisabled}, + }, + wantRunning: nil, + wantDyncfg: ` +FUNCTION_RESULT_BEGIN 1-add 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status accepted + +FUNCTION_RESULT_BEGIN 2-disable 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled + +FUNCTION_RESULT_BEGIN 3-update 200 application/json 200 +{"status":200,"message":""} +FUNCTION_RESULT_END + +CONFIG go.d:collector:success:jobs:test status disabled +`, + } + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + sim := test.createSim() + sim.run(t) + }) } - var buf bytes.Buffer - mgr := NewManager() - mgr.Modules = prepareMockRegistry() - mgr.Out = safewriter.New(&buf) - mgr.PluginName = "test.plugin" - - ctx, cancel := context.WithCancel(context.Background()) - in := make(chan []*confgroup.Group) - var wg sync.WaitGroup - - wg.Add(1) - go func() { defer wg.Done(); mgr.Run(ctx, in) }() - - select { - case in <- groups: - case <-time.After(time.Second * 2): +} + +func prepareCfgGroup(src, srcType string, configs ...confgroup.Config) *confgroup.Group { + return &confgroup.Group{ + Configs: configs, + Source: src, + SourceType: srcType, } +} - time.Sleep(time.Second * 5) - cancel() - wg.Wait() +func prepareStockCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("stock"). + SetProvider("test"). + SetSource(fmt.Sprintf("type=stock,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) +} + +func prepareUserCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("user"). + SetProvider("test"). + SetSource(fmt.Sprintf("type=user,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) +} - assert.True(t, buf.String() != "") +func prepareDiscoveredCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("discovered"). + SetProvider("test"). + SetSource(fmt.Sprintf("type=discovered,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) } -func prepareMockRegistry() module.Registry { - reg := module.Registry{} - reg.Register("success", module.Creator{ - Create: func() module.Module { - return &module.MockModule{ - InitFunc: func() bool { return true }, - CheckFunc: func() bool { return true }, - ChartsFunc: func() *module.Charts { - return &module.Charts{ - &module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}, - } - }, - CollectFunc: func() map[string]int64 { - return map[string]int64{"id1": 1} - }, - } - }, - }) - reg.Register("fail", module.Creator{ - Create: func() module.Module { - return &module.MockModule{ - InitFunc: func() bool { return false }, - } - }, - }) - return reg +func prepareDyncfgCfg(module, job string) confgroup.Config { + return confgroup.Config{}. + SetSourceType("dyncfg"). + SetProvider("dyncfg"). + SetSource(fmt.Sprintf("type=dyncfg,module=%s,job=%s", module, job)). + SetModule(module). + SetName(job) } diff --git a/agent/jobmgr/noop.go b/agent/jobmgr/noop.go index 15883105d..4c2801c24 100644 --- a/agent/jobmgr/noop.go +++ b/agent/jobmgr/noop.go @@ -3,18 +3,19 @@ package jobmgr import ( + "github.com/netdata/go.d.plugin/agent/functions" + "github.com/netdata/go.d.plugin/agent/confgroup" "github.com/netdata/go.d.plugin/agent/vnodes" ) type noop struct{} -func (n noop) Lock(string) (bool, error) { return true, nil } -func (n noop) Unlock(string) error { return nil } -func (n noop) Save(confgroup.Config, string) {} -func (n noop) Remove(confgroup.Config) {} -func (n noop) Contains(confgroup.Config, ...string) bool { return false } -func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false } -func (n noop) Register(confgroup.Config) { return } -func (n noop) Unregister(confgroup.Config) { return } -func (n noop) UpdateStatus(confgroup.Config, string, string) { return } +func (n noop) Lock(string) (bool, error) { return true, nil } +func (n noop) Unlock(string) error { return nil } +func (n noop) Save(confgroup.Config, string) {} +func (n noop) Remove(confgroup.Config) {} +func (n noop) Contains(confgroup.Config, ...string) bool { return false } +func (n noop) Lookup(string) (*vnodes.VirtualNode, bool) { return nil, false } +func (n noop) Register(name string, reg func(functions.Function)) {} +func (n noop) Unregister(name string) {} diff --git a/agent/jobmgr/run.go b/agent/jobmgr/run.go deleted file mode 100644 index f1a14cadc..000000000 --- a/agent/jobmgr/run.go +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package jobmgr - -import ( - "context" - "slices" - "time" - - "github.com/netdata/go.d.plugin/agent/ticker" -) - -func (m *Manager) runRunningJobsHandling(ctx context.Context) { - tk := ticker.New(time.Second) - defer tk.Stop() - - for { - select { - case <-ctx.Done(): - return - case clock := <-tk.C: - //m.Debugf("tick %d", clock) - m.notifyRunningJobs(clock) - } - } -} - -func (m *Manager) notifyRunningJobs(clock int) { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - for _, v := range m.queue { - v.Tick(clock) - } -} - -func (m *Manager) startJob(job Job) { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - go job.Start() - - m.queue = append(m.queue, job) -} - -func (m *Manager) stopJob(name string) { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - idx := slices.IndexFunc(m.queue, func(job Job) bool { - return job.FullName() == name - }) - - if idx != -1 { - j := m.queue[idx] - j.Stop() - - copy(m.queue[idx:], m.queue[idx+1:]) - m.queue[len(m.queue)-1] = nil - m.queue = m.queue[:len(m.queue)-1] - } -} - -func (m *Manager) stopRunningJobs() { - m.queueMux.Lock() - defer m.queueMux.Unlock() - - for i, v := range m.queue { - v.Stop() - m.queue[i] = nil - } - m.queue = m.queue[:0] -} diff --git a/agent/jobmgr/sim_test.go b/agent/jobmgr/sim_test.go new file mode 100644 index 000000000..870f19d7c --- /dev/null +++ b/agent/jobmgr/sim_test.go @@ -0,0 +1,131 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package jobmgr + +import ( + "bytes" + "context" + "errors" + "fmt" + "slices" + "strings" + "testing" + "time" + + "github.com/netdata/go.d.plugin/agent/confgroup" + "github.com/netdata/go.d.plugin/agent/module" + "github.com/netdata/go.d.plugin/agent/netdataapi" + "github.com/netdata/go.d.plugin/agent/safewriter" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +type runSim struct { + do func(mgr *Manager) + + wantDiscovered []confgroup.Config + wantSeen []seenConfig + wantExposed []seenConfig + wantRunning []string + wantDyncfg string +} + +func (s *runSim) run(t *testing.T) { + t.Helper() + + require.NotNil(t, s.do, "s.do is nil") + + var buf bytes.Buffer + mgr := New() + mgr.api = netdataapi.New(safewriter.New(&buf)) + mgr.Modules = prepareMockRegistry() + + done := make(chan struct{}) + grpCh := make(chan []*confgroup.Group) + ctx, cancel := context.WithCancel(context.Background()) + + go func() { defer close(done); close(grpCh); mgr.Run(ctx, grpCh) }() + + timeout := time.Second * 5 + + select { + case <-mgr.started: + case <-time.After(timeout): + t.Errorf("failed to start work in %s", timeout) + } + + s.do(mgr) + cancel() + + select { + case <-done: + case <-time.After(timeout): + t.Errorf("failed to finish work in %s", timeout) + } + + parts := strings.Split(buf.String(), "\n") + parts = slices.DeleteFunc(parts, func(s string) bool { + return strings.HasPrefix(s, "CONFIG") && strings.Contains(s, " template ") + }) + + wantDyncfg, gotDyncfg := strings.TrimSpace(s.wantDyncfg), strings.TrimSpace(strings.Join(parts, "\n")) + + fmt.Println(gotDyncfg) + + assert.Equal(t, wantDyncfg, gotDyncfg, "dyncfg commands") + + var n int + for _, cfgs := range mgr.discoveredConfigs.items { + n += len(cfgs) + } + + require.Len(t, s.wantDiscovered, n, "discoveredConfigs: different len") + + for _, cfg := range s.wantDiscovered { + cfgs, ok := mgr.discoveredConfigs.items[cfg.Source()] + require.Truef(t, ok, "discoveredConfigs: source %s is not found", cfg.Source()) + _, ok = cfgs[cfg.Hash()] + require.Truef(t, ok, "discoveredConfigs: source %s config %d is not found", cfg.Source(), cfg.Hash()) + } + + require.Len(t, s.wantSeen, len(mgr.seenConfigs.items), "seenConfigs: different len") + + for _, scfg := range s.wantSeen { + v, ok := mgr.seenConfigs.lookup(scfg.cfg) + require.Truef(t, ok, "seenConfigs: config '%s' is not found", scfg.cfg.UID()) + require.Truef(t, scfg.status == v.status, "seenConfigs: wrong status, want %s got %s", scfg.status, v.status) + } + + require.Len(t, s.wantExposed, len(mgr.exposedConfigs.items), "exposedConfigs: different len") + + for _, scfg := range s.wantExposed { + v, ok := mgr.exposedConfigs.lookup(scfg.cfg) + require.Truef(t, ok && scfg.cfg.UID() == v.cfg.UID(), "exposedConfigs: config '%s' is not found", scfg.cfg.UID()) + require.Truef(t, scfg.status == v.status, "exposedConfigs: wrong status, want %s got %s", scfg.status, v.status) + } +} + +func prepareMockRegistry() module.Registry { + reg := module.Registry{} + + reg.Register("success", module.Creator{ + JobConfigSchema: module.MockConfigSchema, + Create: func() module.Module { + return &module.MockModule{ + ChartsFunc: func() *module.Charts { + return &module.Charts{&module.Chart{ID: "id", Title: "title", Units: "units", Dims: module.Dims{{ID: "id1"}}}} + }, + CollectFunc: func() map[string]int64 { return map[string]int64{"id1": 1} }, + } + }, + }) + reg.Register("fail", module.Creator{ + Create: func() module.Module { + return &module.MockModule{ + InitFunc: func() error { return errors.New("mock failed init") }, + } + }, + }) + + return reg +} diff --git a/agent/module/job.go b/agent/module/job.go index 6200ff9f5..b9b41f03f 100644 --- a/agent/module/job.go +++ b/agent/module/job.go @@ -4,6 +4,7 @@ package module import ( "bytes" + "errors" "fmt" "io" "log/slog" @@ -85,6 +86,10 @@ const ( func NewJob(cfg JobConfig) *Job { var buf bytes.Buffer + if cfg.UpdateEvery == 0 { + cfg.UpdateEvery = 1 + } + j := &Job{ AutoDetectEvery: cfg.AutoDetectEvery, AutoDetectTries: infTries, @@ -167,40 +172,44 @@ type Job struct { const NetdataChartIDMaxLength = 1000 // FullName returns job full name. -func (j Job) FullName() string { +func (j *Job) FullName() string { return j.fullName } // ModuleName returns job module name. -func (j Job) ModuleName() string { +func (j *Job) ModuleName() string { return j.moduleName } // Name returns job name. -func (j Job) Name() string { +func (j *Job) Name() string { return j.name } // Panicked returns 'panicked' flag value. -func (j Job) Panicked() bool { +func (j *Job) Panicked() bool { return j.panicked } // AutoDetectionEvery returns value of AutoDetectEvery. -func (j Job) AutoDetectionEvery() int { +func (j *Job) AutoDetectionEvery() int { return j.AutoDetectEvery } // RetryAutoDetection returns whether it is needed to retry autodetection. -func (j Job) RetryAutoDetection() bool { +func (j *Job) RetryAutoDetection() bool { return j.AutoDetectEvery > 0 && (j.AutoDetectTries == infTries || j.AutoDetectTries > 0) } +func (j *Job) Configuration() any { + return j.module.Configuration() +} + // AutoDetection invokes init, check and postCheck. It handles panic. -func (j *Job) AutoDetection() (ok bool) { +func (j *Job) AutoDetection() (err error) { defer func() { if r := recover(); r != nil { - ok = false + err = fmt.Errorf("panic %v", err) j.panicked = true j.disableAutoDetection() @@ -209,7 +218,7 @@ func (j *Job) AutoDetection() (ok bool) { j.Errorf("STACK: %s", debug.Stack()) } } - if !ok { + if err != nil { j.module.Cleanup() } }() @@ -218,29 +227,29 @@ func (j *Job) AutoDetection() (ok bool) { j.Mute() } - if ok = j.init(); !ok { + if err = j.init(); err != nil { j.Error("init failed") j.Unmute() j.disableAutoDetection() - return + return err } - if ok = j.check(); !ok { + if err = j.check(); err != nil { j.Error("check failed") j.Unmute() - return + return err } j.Unmute() - j.Info("check success") - if ok = j.postCheck(); !ok { + + if err = j.postCheck(); err != nil { j.Error("postCheck failed") j.disableAutoDetection() - return + return err } - return true + return nil } // Tick Tick. @@ -316,34 +325,40 @@ func (j *Job) Cleanup() { } } -func (j *Job) init() bool { +func (j *Job) init() error { if j.initialized { - return true + return nil + } + + if err := j.module.Init(); err != nil { + return err } - j.initialized = j.module.Init() + j.initialized = true - return j.initialized + return nil } -func (j *Job) check() bool { - ok := j.module.Check() - if !ok && j.AutoDetectTries != infTries { - j.AutoDetectTries-- +func (j *Job) check() error { + if err := j.module.Check(); err != nil { + if j.AutoDetectTries != infTries { + j.AutoDetectTries-- + } + return err } - return ok + return nil } -func (j *Job) postCheck() bool { +func (j *Job) postCheck() error { if j.charts = j.module.Charts(); j.charts == nil { j.Error("nil charts") - return false + return errors.New("nil charts") } if err := checkCharts(*j.charts...); err != nil { j.Errorf("charts check: %v", err) - return false + return err } - return true + return nil } func (j *Job) runOnce() { @@ -562,7 +577,7 @@ func (j *Job) updateChart(chart *Chart, collected map[string]int64, sinceLastRun return chart.updated } -func (j Job) penalty() int { +func (j *Job) penalty() int { v := j.retries / penaltyStep * penaltyStep * j.updateEvery / 2 if v > maxPenalty { return maxPenalty diff --git a/agent/module/job_test.go b/agent/module/job_test.go index f19fdcebd..c87f840d5 100644 --- a/agent/module/job_test.go +++ b/agent/module/job_test.go @@ -3,6 +3,7 @@ package module import ( + "errors" "fmt" "io" "testing" @@ -72,10 +73,10 @@ func TestJob_AutoDetectionEvery(t *testing.T) { func TestJob_RetryAutoDetection(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { return false }, + CheckFunc: func() error { return errors.New("check error") }, ChartsFunc: func() *Charts { return &Charts{} }, @@ -86,14 +87,14 @@ func TestJob_RetryAutoDetection(t *testing.T) { assert.True(t, job.RetryAutoDetection()) assert.Equal(t, infTries, job.AutoDetectTries) for i := 0; i < 1000; i++ { - job.check() + _ = job.check() } assert.True(t, job.RetryAutoDetection()) assert.Equal(t, infTries, job.AutoDetectTries) job.AutoDetectTries = 10 for i := 0; i < 10; i++ { - job.check() + _ = job.check() } assert.False(t, job.RetryAutoDetection()) assert.Equal(t, 0, job.AutoDetectTries) @@ -103,13 +104,13 @@ func TestJob_AutoDetection(t *testing.T) { job := newTestJob() var v int m := &MockModule{ - InitFunc: func() bool { + InitFunc: func() error { v++ - return true + return nil }, - CheckFunc: func() bool { + CheckFunc: func() error { v++ - return true + return nil }, ChartsFunc: func() *Charts { v++ @@ -118,47 +119,47 @@ func TestJob_AutoDetection(t *testing.T) { } job.module = m - assert.True(t, job.AutoDetection()) + assert.NoError(t, job.AutoDetection()) assert.Equal(t, 3, v) } func TestJob_AutoDetection_FailInit(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return false + InitFunc: func() error { + return errors.New("init error") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_FailCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { - return false + CheckFunc: func() error { + return errors.New("check error") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_FailPostCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { - return true + CheckFunc: func() error { + return nil }, ChartsFunc: func() *Charts { return nil @@ -166,47 +167,47 @@ func TestJob_AutoDetection_FailPostCheck(t *testing.T) { } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_PanicInit(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { + InitFunc: func() error { panic("panic in Init") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_PanicCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { + CheckFunc: func() error { panic("panic in Check") }, } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } func TestJob_AutoDetection_PanicPostCheck(t *testing.T) { job := newTestJob() m := &MockModule{ - InitFunc: func() bool { - return true + InitFunc: func() error { + return nil }, - CheckFunc: func() bool { - return true + CheckFunc: func() error { + return nil }, ChartsFunc: func() *Charts { panic("panic in PostCheck") @@ -214,7 +215,7 @@ func TestJob_AutoDetection_PanicPostCheck(t *testing.T) { } job.module = m - assert.False(t, job.AutoDetection()) + assert.Error(t, job.AutoDetection()) assert.True(t, m.CleanupDone) } diff --git a/agent/module/mock.go b/agent/module/mock.go index c4353eb52..65b93debf 100644 --- a/agent/module/mock.go +++ b/agent/module/mock.go @@ -2,12 +2,40 @@ package module +const MockConfigSchema = ` +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "option_str": { + "type": "string", + "description": "Option string value" + }, + "option_int": { + "type": "integer", + "description": "Option integer value" + } + }, + "required": [ + "option_str", + "option_int" + ] +} +` + +type MockConfiguration struct { + OptionStr string `yaml:"option_str" json:"option_str"` + OptionInt int `yaml:"option_int" json:"option_int"` +} + // MockModule MockModule. type MockModule struct { Base - InitFunc func() bool - CheckFunc func() bool + Config MockConfiguration `yaml:",inline" json:",inline"` + + InitFunc func() error + CheckFunc func() error ChartsFunc func() *Charts CollectFunc func() map[string]int64 CleanupFunc func() @@ -15,23 +43,23 @@ type MockModule struct { } // Init invokes InitFunc. -func (m MockModule) Init() bool { +func (m *MockModule) Init() error { if m.InitFunc == nil { - return true + return nil } return m.InitFunc() } // Check invokes CheckFunc. -func (m MockModule) Check() bool { +func (m *MockModule) Check() error { if m.CheckFunc == nil { - return true + return nil } return m.CheckFunc() } // Charts invokes ChartsFunc. -func (m MockModule) Charts() *Charts { +func (m *MockModule) Charts() *Charts { if m.ChartsFunc == nil { return nil } @@ -39,7 +67,7 @@ func (m MockModule) Charts() *Charts { } // Collect invokes CollectDunc. -func (m MockModule) Collect() map[string]int64 { +func (m *MockModule) Collect() map[string]int64 { if m.CollectFunc == nil { return nil } @@ -53,3 +81,7 @@ func (m *MockModule) Cleanup() { } m.CleanupDone = true } + +func (m *MockModule) Configuration() any { + return m.Config +} diff --git a/agent/module/mock_test.go b/agent/module/mock_test.go index 9c194e893..d7521911f 100644 --- a/agent/module/mock_test.go +++ b/agent/module/mock_test.go @@ -12,17 +12,17 @@ import ( func TestMockModule_Init(t *testing.T) { m := &MockModule{} - assert.True(t, m.Init()) - m.InitFunc = func() bool { return false } - assert.False(t, m.Init()) + assert.NoError(t, m.Init()) + m.InitFunc = func() error { return nil } + assert.NoError(t, m.Init()) } func TestMockModule_Check(t *testing.T) { m := &MockModule{} - assert.True(t, m.Check()) - m.CheckFunc = func() bool { return false } - assert.False(t, m.Check()) + assert.NoError(t, m.Check()) + m.CheckFunc = func() error { return nil } + assert.NoError(t, m.Check()) } func TestMockModule_Charts(t *testing.T) { diff --git a/agent/module/module.go b/agent/module/module.go index 3421a02ee..5c88c6e04 100644 --- a/agent/module/module.go +++ b/agent/module/module.go @@ -9,15 +9,14 @@ import ( // Module is an interface that represents a module. type Module interface { // Init does initialization. - // If it returns false, the job will be disabled. - Init() bool + // If it returns error, the job will be disabled. + Init() error // Check is called after Init. - // If it returns false, the job will be disabled. - Check() bool + // If it returns error, the job will be disabled. + Check() error // Charts returns the chart definition. - // Make sure not to share returned instance. Charts() *Charts // Collect collects metrics. @@ -27,6 +26,8 @@ type Module interface { Cleanup() GetBase() *Base + + Configuration() any } // Base is a helper struct. All modules should embed this struct. diff --git a/agent/module/registry.go b/agent/module/registry.go index 4d0d2c493..f2fa661c1 100644 --- a/agent/module/registry.go +++ b/agent/module/registry.go @@ -44,3 +44,8 @@ func (r Registry) Register(name string, creator Creator) { } r[name] = creator } + +func (r Registry) Lookup(name string) (Creator, bool) { + v, ok := r[name] + return v, ok +} diff --git a/agent/netdataapi/api.go b/agent/netdataapi/api.go index 43c34d22d..b9ade50a8 100644 --- a/agent/netdataapi/api.go +++ b/agent/netdataapi/api.go @@ -165,52 +165,50 @@ func (a *API) HOSTDEFINEEND() error { } func (a *API) HOST(guid string) error { - _, err := a.Write([]byte("HOST " + "'" + guid + "'" + "\n\n")) + _, err := a.Write([]byte("HOST " + "'" + + guid + "'\n\n")) return err } -func (a *API) DynCfgEnable(pluginName string) error { - _, err := a.Write([]byte("DYNCFG_ENABLE '" + pluginName + "'\n\n")) - return err -} +func (a *API) FUNCRESULT(uid, contentType, payload, code string) { + var buf bytes.Buffer -func (a *API) DynCfgReset() error { - _, err := a.Write([]byte("DYNCFG_RESET\n")) - return err -} + buf.WriteString("FUNCTION_RESULT_BEGIN " + + uid + " " + + code + " " + + contentType + " " + + code + "\n", + ) -func (a *API) DyncCfgRegisterModule(moduleName string) error { - _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_MODULE '%s' job_array\n\n", moduleName) - return err -} + if payload != "" { + buf.WriteString(payload + "\n") + } -func (a *API) DynCfgRegisterJob(moduleName, jobName, jobType string) error { - _, err := fmt.Fprintf(a, "DYNCFG_REGISTER_JOB '%s' '%s' '%s' 0\n\n", moduleName, jobName, jobType) - return err -} + buf.WriteString("FUNCTION_RESULT_END\n\n") -func (a *API) DynCfgReportJobStatus(moduleName, jobName, status, reason string) error { - _, err := fmt.Fprintf(a, "REPORT_JOB_STATUS '%s' '%s' '%s' 0 '%s'\n\n", moduleName, jobName, status, reason) - return err + _, _ = buf.WriteTo(a) } -func (a *API) FunctionResultSuccess(uid, contentType, payload string) error { - return a.functionResult(uid, contentType, payload, "1") -} +func (a *API) CONFIGCREATE(id, status, configType, path, sourceType, source, supportedCommands string) { + // https://learn.netdata.cloud/docs/contributing/external-plugins/#config -func (a *API) FunctionResultReject(uid, contentType, payload string) error { - return a.functionResult(uid, contentType, payload, "0") + _, _ = a.Write([]byte("CONFIG " + + id + " " + + "create" + " " + + status + " " + + configType + " " + + path + " " + + sourceType + " '" + + source + "' '" + + supportedCommands + "' 0x0000 0x0000\n\n", + )) + // supportedCommands + "' 0x7ff 0x7ff\n", } -func (a *API) functionResult(uid, contentType, payload, code string) error { - var buf bytes.Buffer - - buf.WriteString("FUNCTION_RESULT_BEGIN " + uid + " " + code + " " + contentType + " 0\n") - if payload != "" { - buf.WriteString(payload + "\n") - } - buf.WriteString("FUNCTION_RESULT_END\n\n") +func (a *API) CONFIGDELETE(id string) { + _, _ = a.Write([]byte("CONFIG " + id + " delete\n\n")) +} - _, err := buf.WriteTo(a) - return err +func (a *API) CONFIGSTATUS(id, status string) { + _, _ = a.Write([]byte("CONFIG " + id + " status " + status + "\n\n")) } diff --git a/agent/netdataapi/api_test.go b/agent/netdataapi/api_test.go index 30f019460..e5087839b 100644 --- a/agent/netdataapi/api_test.go +++ b/agent/netdataapi/api_test.go @@ -260,101 +260,6 @@ HOST_DEFINE_END ) } -func TestAPI_DynCfgEnable(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DynCfgEnable("plugin") - - assert.Equal( - t, - "DYNCFG_ENABLE 'plugin'\n\n", - buf.String(), - ) -} - -func TestAPI_DynCfgReset(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DynCfgReset() - - assert.Equal( - t, - "DYNCFG_RESET\n", - buf.String(), - ) -} - -func TestAPI_DyncCfgRegisterModule(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DyncCfgRegisterModule("module") - - assert.Equal( - t, - "DYNCFG_REGISTER_MODULE 'module' job_array\n\n", - buf.String(), - ) -} - -func TestAPI_DynCfgRegisterJob(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} +func TestAPI_FUNCRESULT(t *testing.T) { - _ = a.DynCfgRegisterJob("module", "job", "type") - - assert.Equal( - t, - "DYNCFG_REGISTER_JOB 'module' 'job' 'type' 0\n\n", - buf.String(), - ) -} - -func TestAPI_DynCfgReportJobStatus(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.DynCfgReportJobStatus("module", "job", "status", "reason") - - assert.Equal( - t, - "REPORT_JOB_STATUS 'module' 'job' 'status' 0 'reason'\n\n", - buf.String(), - ) -} - -func TestAPI_FunctionResultSuccess(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.FunctionResultSuccess("uid", "contentType", "payload") - - assert.Equal( - t, - `FUNCTION_RESULT_BEGIN uid 1 contentType 0 -payload -FUNCTION_RESULT_END - -`, - buf.String(), - ) -} - -func TestAPI_FunctionResultReject(t *testing.T) { - buf := &bytes.Buffer{} - a := API{Writer: buf} - - _ = a.FunctionResultReject("uid", "contentType", "payload") - - assert.Equal( - t, - `FUNCTION_RESULT_BEGIN uid 0 contentType 0 -payload -FUNCTION_RESULT_END - -`, - buf.String(), - ) } diff --git a/config/go.d/energid.conf b/config/go.d/energid.conf deleted file mode 100644 index e6495062e..000000000 --- a/config/go.d/energid.conf +++ /dev/null @@ -1,17 +0,0 @@ -## All available configuration options, their descriptions and default values: -## https://github.com/netdata/go.d.plugin/tree/master/modules/energid - -#update_every: 1 -#autodetection_retry: 0 -#priority: 70000 - -#jobs: -# - name: energi -# url: http://127.0.0.1:9796 -# username: energy -# password: energy -# -# - name: bitcoin -# url: http://203.0.113.0:8332 -# username: bitcoin -# password: bitcoin diff --git a/config/go.d/sd/hostsocket.yaml b/config/go.d/sd/hostsocket.yaml new file mode 100644 index 000000000..f23603d90 --- /dev/null +++ b/config/go.d/sd/hostsocket.yaml @@ -0,0 +1,97 @@ +name: hostsocket + +discover: + hostsocket: + net: + tags: "netsocket" + unix: + tags: "unixsocket" + +classify: + - name: "Applications" + selector: "netsocket" + tags: "app" + match: + - tags: "activemq" + expr: '{{ and (eq .Port "8161") (eg. Comm "activemq") }}' + - tags: "apache" + expr: '{{ and (eq .Port "80" "8080") (eg .Comm "apache" "httpd") }}' + - tags: "bind" + expr: '{{ and (eq .Port "8653") (eg .Comm "bind" "named") }}' + - tags: "cassandra" + expr: '{{ and (eq .Port "7072") (glob .Cmdline "*cassandra*") }}' + - tags: "chrony" + expr: '{{ and (eq .Port "323") (eg .Comm "chronyd") }}' + - tags: "cockroachdb" + expr: '{{ and (eq .Port "8080") (eg .Comm "cockroach") }}' + - tags: "consul" + expr: '{{ and (eq .Port "8500") (eg .Comm "consul") }}' + - tags: "coredns" + expr: '{{ and (eq .Port "9153") (eg .Comm "coredns") }}' + - tags: "couchbase" + expr: '{{ and (eq .Port "8091") (glob .Cmdline "*couchbase*") }}' + - tags: "couchdb" + expr: '{{ and (eq .Port "5984") (glob .Cmdline "*couchdb*") }}' + - tags: "dnsdist" + expr: '{{ and (eq .Port "8083") (eq .Comm "dnsdist") }}' + - tags: "dnsmasq" + expr: '{{ and (eq .Port "53") (eq .Comm "dnsmasq") }}' + - tags: "docker_engine" + expr: '{{ and (eq .Port "9323") (eq .Comm "dockerd") }}' + - tags: "elasticsearch" + expr: '{{ and (eq .Port "9200") (glob .Cmdline "*elasticsearch*") }}' + - tags: "opensearch" + expr: '{{ and (eq .Port "9200") (glob .Cmdline "*opensearch*") }}' + - tags: "envoy" + expr: '{{ and (eq .Port "9901") (eq .Comm "envoy") }}' + - tags: "fluentd" + expr: '{{ and (eq .Port "24220") (glob .Cmdline "*fluentd*") }}' + - tags: "freeradius" + expr: '{{ and (eq .Port "18121") (eq .Comm "freeradius") }}' + - tags: "geth" + expr: '{{ and (eq .Port "6060") (eq .Comm "geth") }}' + - tags: "haproxy" + expr: '{{ and (eq .Port "8404") (eq .Comm "haproxy") }}' + - tags: "hdfs_namenode" + expr: '{{ and (eq .Port "9870") (eq .Comm "hadoop") }}' + - tags: "hdfs_datanode" + expr: '{{ and (eq .Port "9864") (eq .Comm "hadoop") }}' + - tags: "kubelet" + expr: '{{ and (eq .Port "10250" "10255") (eq .Comm "kubelet") }}' + - tags: "kubeproxy" + expr: '{{ and (eq .Port "10249") (eq .Comm "kube-proxy") }}' + - tags: "lighttpd" + expr: '{{ and (eq .Port "80" "8080") (eq .Comm "lighttpd") }}' + - tags: "logstash" + expr: '{{ and (eq .Port "9600") (glob .Cmdline "*logstash*") }}' + - tags: "mongodb" + expr: '{{ and (eq .Port "27017") (eq .Comm "mongod") }}' + - tags: "mysql" + expr: '{{ and (eq .Port "3306") (eq .Comm "mysqld" "mariadb") }}' + - tags: "nginx" + expr: '{{ and (eq .Port "80" "8080") (eg .Comm "nginx") }}' + - tags: "ntpd" + expr: '{{ and (eq .Port "123") (eg .Comm "ntpd") }}' + - tags: "openvpn" + expr: '{{ and (eq .Port "7505") (eg .Comm "openvpn") }}' + - tags: "pgbouncer" + expr: '{{ and (eq .Port "6432") (eg .Comm "pgbouncer") }}' + - tags: "pihole" + expr: '{{ and (eq .Port "53") (eg .Comm "pihole-FTL") }}' + - tags: "pika" + expr: '{{ and (eq .Port "9221") (eg .Comm "pika") }}' + - tags: "postgres" + expr: '{{ and (eq .Port "5432") (eg .Comm "postgres") }}' + - tags: "powerdns" + expr: '{{ and (eq .Port "8081") (eg .Comm "pdns_server") }}' + - tags: "powerdns_recursor" + expr: '{{ and (eq .Port "8081") (eg .Comm "pdns_recursor") }}' + +compose: + - name: "Applications" + config: + - selector: "activemq" + template: | + module: activemq + name: local + url: http://{{.Address}}:{{.Port}} diff --git a/config/go.d/solr.conf b/config/go.d/solr.conf deleted file mode 100644 index c0cc7d095..000000000 --- a/config/go.d/solr.conf +++ /dev/null @@ -1,13 +0,0 @@ -## All available configuration options, their descriptions and default values: -## https://github.com/netdata/go.d.plugin/tree/master/modules/solr - -#update_every: 1 -#autodetection_retry: 0 -#priority: 70000 - -jobs: - - name: local - url: http://localhost:8983 - - - name: local - url: http://127.0.0.1:8983 diff --git a/config/go.d/springboot2.conf b/config/go.d/springboot2.conf deleted file mode 100644 index 6328bcc57..000000000 --- a/config/go.d/springboot2.conf +++ /dev/null @@ -1,13 +0,0 @@ -## All available configuration options, their descriptions and default values: -## https://github.com/netdata/go.d.plugin/tree/master/modules/springboot2 - -#update_every: 1 -#autodetection_retry: 0 -#priority: 70000 - -jobs: - - name: local - url: http://localhost:8080/actuator/prometheus - - - name: local - url: http://127.0.0.1:8080/actuator/prometheus diff --git a/examples/simple/main.go b/examples/simple/main.go index 9982b91fc..f497ccba2 100644 --- a/examples/simple/main.go +++ b/examples/simple/main.go @@ -3,6 +3,7 @@ package main import ( + "errors" "fmt" "log/slog" "math/rand" @@ -24,9 +25,9 @@ type example struct{ module.Base } func (example) Cleanup() {} -func (example) Init() bool { return true } +func (example) Init() error { return nil } -func (example) Check() bool { return true } +func (example) Check() error { return nil } func (example) Charts() *module.Charts { return &module.Charts{ @@ -40,6 +41,7 @@ func (example) Charts() *module.Charts { }, } } +func (example) Configuration() any { return nil } func (e *example) Collect() map[string]int64 { return map[string]int64{ @@ -116,10 +118,10 @@ func main() { func parseCLI() *cli.Option { opt, err := cli.Parse(os.Args) - if err != nil { - if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp { - os.Exit(0) - } + var flagsErr *flags.Error + if errors.As(err, &flagsErr) && errors.Is(flagsErr.Type, flags.ErrHelp) { + os.Exit(0) + } else { os.Exit(1) } return opt diff --git a/modules/activemq/activemq.go b/modules/activemq/activemq.go index 109c874de..8b1a50d8b 100644 --- a/modules/activemq/activemq.go +++ b/modules/activemq/activemq.go @@ -4,8 +4,7 @@ package activemq import ( _ "embed" - "fmt" - "strings" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -24,59 +23,41 @@ func init() { }) } -const ( - keyQueues = "queues" - keyTopics = "topics" - keyAdvisory = "Advisory" -) - -var nameReplacer = strings.NewReplacer(".", "_", " ", "") - -const ( - defaultMaxQueues = 50 - defaultMaxTopics = 50 - defaultURL = "http://127.0.0.1:8161" - defaultHTTPTimeout = time.Second -) - -// New creates Example with default values. func New() *ActiveMQ { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &ActiveMQ{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8161", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, + Webadmin: "admin", + MaxQueues: 50, + MaxTopics: 50, }, - - MaxQueues: defaultMaxQueues, - MaxTopics: defaultMaxTopics, - } - - return &ActiveMQ{ - Config: config, charts: &Charts{}, activeQueues: make(map[string]bool), activeTopics: make(map[string]bool), } } -// Config is the ActiveMQ module configuration. type Config struct { - web.HTTP `yaml:",inline"` - Webadmin string `yaml:"webadmin"` - MaxQueues int `yaml:"max_queues"` - MaxTopics int `yaml:"max_topics"` - QueuesFilter string `yaml:"queues_filter"` - TopicsFilter string `yaml:"topics_filter"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` + Webadmin string `yaml:"webadmin" json:"webadmin"` + MaxQueues int `yaml:"max_queues" json:"max_queues"` + MaxTopics int `yaml:"max_topics" json:"max_topics"` + QueuesFilter string `yaml:"queues_filter" json:"queues_filter"` + TopicsFilter string `yaml:"topics_filter" json:"topics_filter"` } -// ActiveMQ ActiveMQ module. type ActiveMQ struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` apiClient *apiClient activeQueues map[string]bool @@ -86,228 +67,71 @@ type ActiveMQ struct { charts *Charts } -// Cleanup makes cleanup. -func (ActiveMQ) Cleanup() {} - -// Init makes initialization. -func (a *ActiveMQ) Init() bool { - if a.URL == "" { - a.Error("URL not set") - return false - } +func (a *ActiveMQ) Configuration() any { + return a.Config +} - if a.Webadmin == "" { - a.Error("webadmin root path is not set") - return false +func (a *ActiveMQ) Init() error { + if err := a.validateConfig(); err != nil { + a.Errorf("config validation: %v", err) + return err } - if a.QueuesFilter != "" { - f, err := matcher.NewSimplePatternsMatcher(a.QueuesFilter) - if err != nil { - a.Errorf("error on creating queues filter : %v", err) - return false - } - a.queuesFilter = matcher.WithCache(f) + qf, err := a.initQueuesFiler() + if err != nil { + a.Error(err) + return err } + a.queuesFilter = qf - if a.TopicsFilter != "" { - f, err := matcher.NewSimplePatternsMatcher(a.TopicsFilter) - if err != nil { - a.Errorf("error on creating topics filter : %v", err) - return false - } - a.topicsFilter = matcher.WithCache(f) + tf, err := a.initTopicsFilter() + if err != nil { + a.Error(err) + return err } + a.topicsFilter = tf client, err := web.NewHTTPClient(a.Client) if err != nil { a.Error(err) - return false + return err } a.apiClient = newAPIClient(client, a.Request, a.Webadmin) - return true + return nil } -// Check makes check. -func (a *ActiveMQ) Check() bool { - return len(a.Collect()) > 0 -} - -// Charts creates Charts. -func (a ActiveMQ) Charts() *Charts { - return a.charts -} - -// Collect collects metrics. -func (a *ActiveMQ) Collect() map[string]int64 { - metrics := make(map[string]int64) - - var ( - queues *queues - topics *topics - err error - ) - - if queues, err = a.apiClient.getQueues(); err != nil { - a.Error(err) - return nil - } - - if topics, err = a.apiClient.getTopics(); err != nil { +func (a *ActiveMQ) Check() error { + mx, err := a.collect() + if err != nil { a.Error(err) - return nil - } - - a.processQueues(queues, metrics) - a.processTopics(topics, metrics) - - return metrics -} - -func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) { - var ( - count = len(a.activeQueues) - updated = make(map[string]bool) - unp int - ) - - for _, q := range queues.Items { - if strings.Contains(q.Name, keyAdvisory) { - continue - } - - if !a.activeQueues[q.Name] { - if a.MaxQueues != 0 && count > a.MaxQueues { - unp++ - continue - } - - if !a.filterQueues(q.Name) { - continue - } - - a.activeQueues[q.Name] = true - a.addQueueTopicCharts(q.Name, keyQueues) - } - - rname := nameReplacer.Replace(q.Name) - - metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount - metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount - metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount - metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount - - updated[q.Name] = true + return err } + if len(mx) == 0 { + return errors.New("no metrics collected") - for name := range a.activeQueues { - if !updated[name] { - delete(a.activeQueues, name) - a.removeQueueTopicCharts(name, keyQueues) - } - } - - if unp > 0 { - a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues) } + return nil } -func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) { - var ( - count = len(a.activeTopics) - updated = make(map[string]bool) - unp int - ) - - for _, t := range topics.Items { - if strings.Contains(t.Name, keyAdvisory) { - continue - } - - if !a.activeTopics[t.Name] { - if a.MaxTopics != 0 && count > a.MaxTopics { - unp++ - continue - } - - if !a.filterTopics(t.Name) { - continue - } - - a.activeTopics[t.Name] = true - a.addQueueTopicCharts(t.Name, keyTopics) - } - - rname := nameReplacer.Replace(t.Name) - - metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount - metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount - metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount - metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount - - updated[t.Name] = true - } - - for name := range a.activeTopics { - if !updated[name] { - // TODO: delete after timeout? - delete(a.activeTopics, name) - a.removeQueueTopicCharts(name, keyTopics) - } - } - - if unp > 0 { - a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics) - } -} - -func (a ActiveMQ) filterQueues(line string) bool { - if a.queuesFilter == nil { - return true - } - return a.queuesFilter.MatchString(line) +func (a *ActiveMQ) Charts() *Charts { + return a.charts } -func (a ActiveMQ) filterTopics(line string) bool { - if a.topicsFilter == nil { - return true +func (a *ActiveMQ) Cleanup() { + if a.apiClient != nil && a.apiClient.httpClient != nil { + a.apiClient.httpClient.CloseIdleConnections() } - return a.topicsFilter.MatchString(line) } -func (a *ActiveMQ) addQueueTopicCharts(name, typ string) { - rname := nameReplacer.Replace(name) - - charts := charts.Copy() - - for _, chart := range *charts { - chart.ID = fmt.Sprintf(chart.ID, typ, rname) - chart.Title = fmt.Sprintf(chart.Title, name) - chart.Fam = typ +func (a *ActiveMQ) Collect() map[string]int64 { + mx, err := a.collect() - for _, dim := range chart.Dims { - dim.ID = fmt.Sprintf(dim.ID, typ, rname) - } + if err != nil { + a.Error(err) + return nil } - _ = a.charts.Add(*charts...) - -} - -func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) { - rname := nameReplacer.Replace(name) - - chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname)) - chart.MarkRemove() - chart.MarkNotCreated() - - chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname)) - chart.MarkRemove() - chart.MarkNotCreated() - - chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname)) - chart.MarkRemove() - chart.MarkNotCreated() + return mx } diff --git a/modules/activemq/activemq_test.go b/modules/activemq/activemq_test.go index e45ceecd4..5e11dfbde 100644 --- a/modules/activemq/activemq_test.go +++ b/modules/activemq/activemq_test.go @@ -9,7 +9,6 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -131,25 +130,15 @@ var ( } ) -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration) - assert.Equal(t, defaultMaxQueues, job.MaxQueues) - assert.Equal(t, defaultMaxTopics, job.MaxTopics) -} - func TestActiveMQ_Init(t *testing.T) { job := New() // NG case - assert.False(t, job.Init()) + assert.Error(t, job.Init()) // OK case job.Webadmin = "webadmin" - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -170,8 +159,8 @@ func TestActiveMQ_Check(t *testing.T) { job.HTTP.Request = web.Request{URL: ts.URL} job.Webadmin = "webadmin" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) } func TestActiveMQ_Charts(t *testing.T) { @@ -203,8 +192,8 @@ func TestActiveMQ_Collect(t *testing.T) { job.HTTP.Request = web.Request{URL: ts.URL} job.Webadmin = "webadmin" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) cases := []struct { expected map[string]int64 @@ -310,8 +299,8 @@ func TestActiveMQ_404(t *testing.T) { job.Webadmin = "webadmin" job.HTTP.Request = web.Request{URL: ts.URL} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestActiveMQ_InvalidData(t *testing.T) { @@ -324,6 +313,6 @@ func TestActiveMQ_InvalidData(t *testing.T) { mod.Webadmin = "webadmin" mod.HTTP.Request = web.Request{URL: ts.URL} - require.True(t, mod.Init()) - assert.False(t, mod.Check()) + require.NoError(t, mod.Init()) + assert.Error(t, mod.Check()) } diff --git a/modules/activemq/apiclient.go b/modules/activemq/apiclient.go index 6835fd5aa..0be94fe70 100644 --- a/modules/activemq/apiclient.go +++ b/modules/activemq/apiclient.go @@ -5,11 +5,12 @@ package activemq import ( "encoding/xml" "fmt" - "github.com/netdata/go.d.plugin/pkg/web" "io" "net/http" "net/url" "path" + + "github.com/netdata/go.d.plugin/pkg/web" ) type topics struct { @@ -104,7 +105,7 @@ func (a *apiClient) getTopics() (*topics, error) { return &topics, nil } -func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { +func (a *apiClient) doRequestOK(req *http.Request) (*http.Response, error) { resp, err := a.httpClient.Do(req) if err != nil { return resp, fmt.Errorf("error on request to %s : %v", req.URL, err) @@ -117,7 +118,7 @@ func (a apiClient) doRequestOK(req *http.Request) (*http.Response, error) { return resp, err } -func (a apiClient) createRequest(urlPath string) (*http.Request, error) { +func (a *apiClient) createRequest(urlPath string) (*http.Request, error) { req := a.request.Copy() u, err := url.Parse(req.URL) if err != nil { diff --git a/modules/activemq/collect.go b/modules/activemq/collect.go new file mode 100644 index 000000000..0dbaf5544 --- /dev/null +++ b/modules/activemq/collect.go @@ -0,0 +1,185 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import ( + "fmt" + "strings" +) + +const ( + keyQueues = "queues" + keyTopics = "topics" + keyAdvisory = "Advisory" +) + +var nameReplacer = strings.NewReplacer(".", "_", " ", "") + +func (a *ActiveMQ) collect() (map[string]int64, error) { + metrics := make(map[string]int64) + + var ( + queues *queues + topics *topics + err error + ) + + if queues, err = a.apiClient.getQueues(); err != nil { + return nil, err + } + + if topics, err = a.apiClient.getTopics(); err != nil { + return nil, err + } + + a.processQueues(queues, metrics) + a.processTopics(topics, metrics) + + return metrics, nil +} + +func (a *ActiveMQ) processQueues(queues *queues, metrics map[string]int64) { + var ( + count = len(a.activeQueues) + updated = make(map[string]bool) + unp int + ) + + for _, q := range queues.Items { + if strings.Contains(q.Name, keyAdvisory) { + continue + } + + if !a.activeQueues[q.Name] { + if a.MaxQueues != 0 && count > a.MaxQueues { + unp++ + continue + } + + if !a.filterQueues(q.Name) { + continue + } + + a.activeQueues[q.Name] = true + a.addQueueTopicCharts(q.Name, keyQueues) + } + + rname := nameReplacer.Replace(q.Name) + + metrics["queues_"+rname+"_consumers"] = q.Stats.ConsumerCount + metrics["queues_"+rname+"_enqueued"] = q.Stats.EnqueueCount + metrics["queues_"+rname+"_dequeued"] = q.Stats.DequeueCount + metrics["queues_"+rname+"_unprocessed"] = q.Stats.EnqueueCount - q.Stats.DequeueCount + + updated[q.Name] = true + } + + for name := range a.activeQueues { + if !updated[name] { + delete(a.activeQueues, name) + a.removeQueueTopicCharts(name, keyQueues) + } + } + + if unp > 0 { + a.Debugf("%d queues were unprocessed due to max_queues limit (%d)", unp, a.MaxQueues) + } +} + +func (a *ActiveMQ) processTopics(topics *topics, metrics map[string]int64) { + var ( + count = len(a.activeTopics) + updated = make(map[string]bool) + unp int + ) + + for _, t := range topics.Items { + if strings.Contains(t.Name, keyAdvisory) { + continue + } + + if !a.activeTopics[t.Name] { + if a.MaxTopics != 0 && count > a.MaxTopics { + unp++ + continue + } + + if !a.filterTopics(t.Name) { + continue + } + + a.activeTopics[t.Name] = true + a.addQueueTopicCharts(t.Name, keyTopics) + } + + rname := nameReplacer.Replace(t.Name) + + metrics["topics_"+rname+"_consumers"] = t.Stats.ConsumerCount + metrics["topics_"+rname+"_enqueued"] = t.Stats.EnqueueCount + metrics["topics_"+rname+"_dequeued"] = t.Stats.DequeueCount + metrics["topics_"+rname+"_unprocessed"] = t.Stats.EnqueueCount - t.Stats.DequeueCount + + updated[t.Name] = true + } + + for name := range a.activeTopics { + if !updated[name] { + // TODO: delete after timeout? + delete(a.activeTopics, name) + a.removeQueueTopicCharts(name, keyTopics) + } + } + + if unp > 0 { + a.Debugf("%d topics were unprocessed due to max_topics limit (%d)", unp, a.MaxTopics) + } +} + +func (a *ActiveMQ) filterQueues(line string) bool { + if a.queuesFilter == nil { + return true + } + return a.queuesFilter.MatchString(line) +} + +func (a *ActiveMQ) filterTopics(line string) bool { + if a.topicsFilter == nil { + return true + } + return a.topicsFilter.MatchString(line) +} + +func (a *ActiveMQ) addQueueTopicCharts(name, typ string) { + rname := nameReplacer.Replace(name) + + charts := charts.Copy() + + for _, chart := range *charts { + chart.ID = fmt.Sprintf(chart.ID, typ, rname) + chart.Title = fmt.Sprintf(chart.Title, name) + chart.Fam = typ + + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, typ, rname) + } + } + + _ = a.charts.Add(*charts...) + +} + +func (a *ActiveMQ) removeQueueTopicCharts(name, typ string) { + rname := nameReplacer.Replace(name) + + chart := a.charts.Get(fmt.Sprintf("%s_%s_messages", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() + + chart = a.charts.Get(fmt.Sprintf("%s_%s_unprocessed_messages", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() + + chart = a.charts.Get(fmt.Sprintf("%s_%s_consumers", typ, rname)) + chart.MarkRemove() + chart.MarkNotCreated() +} diff --git a/modules/activemq/config_schema.json b/modules/activemq/config_schema.json index abefb5d2f..c525f56a1 100644 --- a/modules/activemq/config_schema.json +++ b/modules/activemq/config_schema.json @@ -1,75 +1,303 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/activemq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ActiveMQ collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "filtering", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "url": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "filtering" + } + }, + "allOf": [ + { + "$ref": "#/definitions/filteringSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the ActiveMQ Web Console API.", + "type": "string", + "default": "http://127.0.0.1:8161" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "webadmin": { + "title": "Webadmin path", + "description": "Webadmin root path.", + "type": "string", + "default": "admin" + } + }, + "required": [ + "url", + "webadmin" + ] + }, + "filteringSectionConfig": { + "type": "object", + "properties": { + "max_queues": { + "title": "Queue limit", + "description": "The maximum number of concurrently collected queues.", + "type": "integer", + "minimum": 0, + "default": 50 + }, + "queues_filter": { + "title": "Queue selector", + "description": "Queues matching the selector will be monitored. Patterns follow the syntax of Netdata simple patterns.", + "type": "string", + "minimum": 1, + "default": "*" + }, + "max_topics": { + "title": "Topic limit", + "description": "The maximum number of concurrently collected queues.", + "type": "integer", + "minimum": 0, + "default": 50 + }, + "topics_filter": { + "title": "Topic selector", + "description": "Topics matching the selector will be monitored. Patterns follow the syntax of Netdata simple patterns.", + "type": "string", + "minimum": 1, + "default": "*" + } + } + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "webadmin": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "max_queues": { - "type": "integer" + "ui:help": "Setting the value to 0 removes the limit, which can cause additional overhead for both Netdata and the host system if there are a lot of queues." }, "max_topics": { - "type": "integer" - }, - "queues_filter": { - "type": "string" - }, - "topics_filter": { - "type": "string" - }, - "username": { - "type": "string" + "ui:help": "Setting the value to 0 removes the limit, which can cause additional overhead for both Netdata and the host system if there are a lot of topics." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url", - "webadmin" - ] + } } diff --git a/modules/activemq/init.go b/modules/activemq/init.go new file mode 100644 index 000000000..920f0dd62 --- /dev/null +++ b/modules/activemq/init.go @@ -0,0 +1,32 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package activemq + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +func (a *ActiveMQ) validateConfig() error { + if a.URL == "" { + return errors.New("url not set") + } + if a.Webadmin == "" { + return errors.New("webadmin root path set") + } + return nil +} + +func (a *ActiveMQ) initQueuesFiler() (matcher.Matcher, error) { + if a.QueuesFilter == "" { + return matcher.TRUE(), nil + } + return matcher.NewSimplePatternsMatcher(a.QueuesFilter) +} + +func (a *ActiveMQ) initTopicsFilter() (matcher.Matcher, error) { + if a.TopicsFilter == "" { + return matcher.TRUE(), nil + } + return matcher.NewSimplePatternsMatcher(a.TopicsFilter) +} diff --git a/modules/apache/apache.go b/modules/apache/apache.go index 8b117463d..8c925f9b0 100644 --- a/modules/apache/apache.go +++ b/modules/apache/apache.go @@ -4,6 +4,7 @@ package apache import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -30,7 +31,7 @@ func New() *Apache { URL: "http://127.0.0.1/server-status?auto", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second), }, }, }, @@ -40,13 +41,14 @@ func New() *Apache { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type Apache struct { module.Base - - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts @@ -54,26 +56,40 @@ type Apache struct { once *sync.Once } -func (a *Apache) Init() bool { - if err := a.verifyConfig(); err != nil { +func (a *Apache) Configuration() any { + return a.Config +} + +func (a *Apache) Init() error { + if err := a.validateConfig(); err != nil { a.Errorf("config validation: %v", err) - return false + return err } httpClient, err := a.initHTTPClient() if err != nil { a.Errorf("init HTTP client: %v", err) - return false + return err } a.httpClient = httpClient a.Debugf("using URL %s", a.URL) - a.Debugf("using timeout: %s", a.Timeout.Duration) - return true + a.Debugf("using timeout: %s", a.Timeout) + + return nil } -func (a *Apache) Check() bool { - return len(a.Collect()) > 0 +func (a *Apache) Check() error { + mx, err := a.collect() + if err != nil { + a.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (a *Apache) Charts() *module.Charts { diff --git a/modules/apache/apache_test.go b/modules/apache/apache_test.go index a507113f3..9b9ec5575 100644 --- a/modules/apache/apache_test.go +++ b/modules/apache/apache_test.go @@ -66,9 +66,9 @@ func TestApache_Init(t *testing.T) { apache.Config = test.config if test.wantFail { - assert.False(t, apache.Init()) + assert.Error(t, apache.Init()) } else { - assert.True(t, apache.Init()) + assert.NoError(t, apache.Init()) } }) } @@ -115,9 +115,9 @@ func TestApache_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, apache.Check()) + assert.Error(t, apache.Check()) } else { - assert.True(t, apache.Check()) + assert.NoError(t, apache.Check()) } }) } @@ -255,7 +255,7 @@ func caseMPMEventSimpleStatus(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -268,7 +268,7 @@ func caseMPMEventExtendedStatus(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -281,7 +281,7 @@ func caseMPMPreforkExtendedStatus(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -294,7 +294,7 @@ func caseLighttpdResponse(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -307,7 +307,7 @@ func caseInvalidDataResponse(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } @@ -316,7 +316,7 @@ func caseConnectionRefused(t *testing.T) (*Apache, func()) { t.Helper() apache := New() apache.URL = "http://127.0.0.1:65001/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, func() {} } @@ -329,7 +329,7 @@ func case404(t *testing.T) (*Apache, func()) { })) apache := New() apache.URL = srv.URL + "/server-status?auto" - require.True(t, apache.Init()) + require.NoError(t, apache.Init()) return apache, srv.Close } diff --git a/modules/apache/config_schema.json b/modules/apache/config_schema.json index 81ece2b67..e33b0500a 100644 --- a/modules/apache/config_schema.json +++ b/modules/apache/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/apache job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Apache/HTTPd collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Apache status page to monitor.", + "type": "string", + "default": "http://127.0.0.1/server-status?auto" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/apache/init.go b/modules/apache/init.go index 355999770..8c4699cc1 100644 --- a/modules/apache/init.go +++ b/modules/apache/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (a Apache) verifyConfig() error { +func (a *Apache) validateConfig() error { if a.URL == "" { return errors.New("url not set") } @@ -20,6 +20,6 @@ func (a Apache) verifyConfig() error { return nil } -func (a Apache) initHTTPClient() (*http.Client, error) { +func (a *Apache) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(a.Client) } diff --git a/modules/bind/bind.go b/modules/bind/bind.go index bcca0204e..e4150833f 100644 --- a/modules/bind/bind.go +++ b/modules/bind/bind.go @@ -4,8 +4,8 @@ package bind import ( _ "embed" - "fmt" - "strings" + "errors" + "net/http" "time" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -24,286 +24,113 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1:8653/json/v1" - defaultHTTPTimeout = time.Second * 2 -) - -// New creates Bind with default values. func New() *Bind { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &Bind{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8653/json/v1", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &Bind{ - Config: config, charts: &Charts{}, } } -type bindAPIClient interface { - serverStats() (*serverStats, error) -} - -// Config is the Bind module configuration. type Config struct { - web.HTTP `yaml:",inline"` - PermitView string `yaml:"permit_view"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` + PermitView string `yaml:"permit_view" json:"permit_view"` } -// Bind Bind module. -type Bind struct { - module.Base - Config `yaml:",inline"` +type ( + Bind struct { + module.Base + Config `yaml:",inline" json:",inline"` - bindAPIClient - permitView matcher.Matcher - charts *Charts -} + charts *Charts -// Cleanup makes cleanup. -func (Bind) Cleanup() {} + permitView matcher.Matcher -// Init makes initialization. -func (b *Bind) Init() bool { - if b.URL == "" { - b.Error("URL not set") - return false + httpClient *http.Client + bindAPIClient } - client, err := web.NewHTTPClient(b.Client) + bindAPIClient interface { + serverStats() (*serverStats, error) + } +) + +func (b *Bind) Configuration() any { + return b.Config +} + +func (b *Bind) Init() error { + if err := b.validateConfig(); err != nil { + b.Errorf("config verification: %v", err) + return err + } + + pvm, err := b.initPermitViewMatcher() if err != nil { - b.Errorf("error on creating http client : %v", err) - return false + b.Error(err) + return err + } + if pvm != nil { + b.permitView = pvm } - switch { - case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+ - b.bindAPIClient = newXML3Client(client, b.Request) - case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+ - b.bindAPIClient = newJSONClient(client, b.Request) - default: - b.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL) - return false + httpClient, err := web.NewHTTPClient(b.Client) + if err != nil { + b.Errorf("creating http client : %v", err) + return err } + b.httpClient = httpClient - if b.PermitView != "" { - m, err := matcher.NewSimplePatternsMatcher(b.PermitView) - if err != nil { - b.Errorf("error on creating permitView matcher : %v", err) - return false - } - b.permitView = matcher.WithCache(m) + bindClient, err := b.initBindApiClient(httpClient) + if err != nil { + b.Error(err) + return err } + b.bindAPIClient = bindClient - return true + return nil } -// Check makes check. -func (b *Bind) Check() bool { - return len(b.Collect()) > 0 +func (b *Bind) Check() error { + mx, err := b.collect() + if err != nil { + b.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } -// Charts creates Charts. -func (b Bind) Charts() *Charts { +func (b *Bind) Charts() *Charts { return b.charts } -// Collect collects metrics. func (b *Bind) Collect() map[string]int64 { - metrics := make(map[string]int64) + mx, err := b.collect() - s, err := b.serverStats() if err != nil { b.Error(err) return nil } - b.collectServerStats(metrics, s) - return metrics + return mx } -func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) { - var chart *Chart - - for k, v := range stats.NSStats { - var ( - algo = module.Incremental - dimName = k - chartID string - ) - switch { - default: - continue - case k == "RecursClients": - dimName = "clients" - chartID = keyRecursiveClients - algo = module.Absolute - case k == "Requestv4": - dimName = "IPv4" - chartID = keyReceivedRequests - case k == "Requestv6": - dimName = "IPv6" - chartID = keyReceivedRequests - case k == "QryFailure": - dimName = "failures" - chartID = keyQueryFailures - case k == "QryUDP": - dimName = "UDP" - chartID = keyProtocolsQueries - case k == "QryTCP": - dimName = "TCP" - chartID = keyProtocolsQueries - case k == "QrySuccess": - dimName = "queries" - chartID = keyQueriesSuccess - case strings.HasSuffix(k, "QryRej"): - chartID = keyQueryFailuresDetail - case strings.HasPrefix(k, "Qry"): - chartID = keyQueriesAnalysis - case strings.HasPrefix(k, "Update"): - chartID = keyReceivedUpdates - } - - if !b.charts.Has(chartID) { - _ = b.charts.Add(charts[chartID].Copy()) - } - - chart = b.charts.Get(chartID) - - if !chart.HasDim(k) { - _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo}) - chart.MarkNotCreated() - } - - delete(stats.NSStats, k) - metrics[k] = v - } - - for _, v := range []struct { - item map[string]int64 - chartID string - }{ - {item: stats.NSStats, chartID: keyNSStats}, - {item: stats.OpCodes, chartID: keyInOpCodes}, - {item: stats.QTypes, chartID: keyInQTypes}, - {item: stats.SockStats, chartID: keyInSockStats}, - } { - if len(v.item) == 0 { - continue - } - - if !b.charts.Has(v.chartID) { - _ = b.charts.Add(charts[v.chartID].Copy()) - } - - chart = b.charts.Get(v.chartID) - - for key, val := range v.item { - if !chart.HasDim(key) { - _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental}) - chart.MarkNotCreated() - } - - metrics[key] = val - } - } - - if !(b.permitView != nil && len(stats.Views) > 0) { - return - } - - for name, view := range stats.Views { - if !b.permitView.MatchString(name) { - continue - } - r := view.Resolver - - delete(r.Stats, "BucketSize") - - for key, val := range r.Stats { - var ( - algo = module.Incremental - dimName = key - chartKey string - ) - - switch { - default: - chartKey = keyResolverStats - case key == "NumFetch": - chartKey = keyResolverNumFetch - dimName = "queries" - algo = module.Absolute - case strings.HasPrefix(key, "QryRTT"): - // TODO: not ordered - chartKey = keyResolverRTT - } - - chartID := fmt.Sprintf(chartKey, name) - - if !b.charts.Has(chartID) { - chart = charts[chartKey].Copy() - chart.ID = chartID - chart.Fam = fmt.Sprintf(chart.Fam, name) - _ = b.charts.Add(chart) - } - - chart = b.charts.Get(chartID) - dimID := fmt.Sprintf("%s_%s", name, key) - - if !chart.HasDim(dimID) { - _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo}) - chart.MarkNotCreated() - } - - metrics[dimID] = val - } - - if len(r.QTypes) > 0 { - chartID := fmt.Sprintf(keyResolverInQTypes, name) - - if !b.charts.Has(chartID) { - chart = charts[keyResolverInQTypes].Copy() - chart.ID = chartID - chart.Fam = fmt.Sprintf(chart.Fam, name) - _ = b.charts.Add(chart) - } - - chart = b.charts.Get(chartID) - - for key, val := range r.QTypes { - dimID := fmt.Sprintf("%s_%s", name, key) - if !chart.HasDim(dimID) { - _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental}) - chart.MarkNotCreated() - } - metrics[dimID] = val - } - } - - if len(r.CacheStats) > 0 { - chartID := fmt.Sprintf(keyResolverCacheHits, name) - - if !b.charts.Has(chartID) { - chart = charts[keyResolverCacheHits].Copy() - chart.ID = chartID - chart.Fam = fmt.Sprintf(chart.Fam, name) - _ = b.charts.Add(chart) - for _, dim := range chart.Dims { - dim.ID = fmt.Sprintf(dim.ID, name) - } - } - - metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"] - metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"] - } +func (b *Bind) Cleanup() { + if b.httpClient != nil { + b.httpClient.CloseIdleConnections() } } diff --git a/modules/bind/bind_test.go b/modules/bind/bind_test.go index 65ff36af0..21b6290b9 100644 --- a/modules/bind/bind_test.go +++ b/modules/bind/bind_test.go @@ -17,28 +17,18 @@ var ( xmlServerData, _ = os.ReadFile("testdata/query-server.xml") ) -func TestNew(t *testing.T) { - job := New() - assert.IsType(t, (*Bind)(nil), job) - assert.NotNil(t, job.charts) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestBind_Cleanup(t *testing.T) { New().Cleanup() } func TestBind_Init(t *testing.T) { // OK job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.bindAPIClient) //NG job = New() job.URL = "" - assert.False(t, job.Init()) - job.URL = defaultURL[:len(defaultURL)-1] - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestBind_Check(t *testing.T) { @@ -54,19 +44,21 @@ func TestBind_Check(t *testing.T) { job := New() job.URL = ts.URL + "/json/v1" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) } func TestBind_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/xml/v3" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } -func TestBind_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } +func TestBind_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} func TestBind_CollectJSON(t *testing.T) { ts := httptest.NewServer( @@ -82,8 +74,8 @@ func TestBind_CollectJSON(t *testing.T) { job.URL = ts.URL + "/json/v1" job.PermitView = "*" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "_default_Queryv4": 4503685324, @@ -259,8 +251,8 @@ func TestBind_CollectXML3(t *testing.T) { job.PermitView = "*" job.URL = ts.URL + "/xml/v3" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "_bind_CookieClientOk": 0, @@ -504,8 +496,8 @@ func TestBind_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/json/v1" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestBind_404(t *testing.T) { @@ -514,6 +506,6 @@ func TestBind_404(t *testing.T) { job := New() job.URL = ts.URL + "/json/v1" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/bind/collect.go b/modules/bind/collect.go new file mode 100644 index 000000000..cd10634b0 --- /dev/null +++ b/modules/bind/collect.go @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/agent/module" +) + +func (b *Bind) collect() (map[string]int64, error) { + mx := make(map[string]int64) + + s, err := b.serverStats() + if err != nil { + return nil, err + } + b.collectServerStats(mx, s) + + return mx, nil +} + +func (b *Bind) collectServerStats(metrics map[string]int64, stats *serverStats) { + var chart *Chart + + for k, v := range stats.NSStats { + var ( + algo = module.Incremental + dimName = k + chartID string + ) + switch { + default: + continue + case k == "RecursClients": + dimName = "clients" + chartID = keyRecursiveClients + algo = module.Absolute + case k == "Requestv4": + dimName = "IPv4" + chartID = keyReceivedRequests + case k == "Requestv6": + dimName = "IPv6" + chartID = keyReceivedRequests + case k == "QryFailure": + dimName = "failures" + chartID = keyQueryFailures + case k == "QryUDP": + dimName = "UDP" + chartID = keyProtocolsQueries + case k == "QryTCP": + dimName = "TCP" + chartID = keyProtocolsQueries + case k == "QrySuccess": + dimName = "queries" + chartID = keyQueriesSuccess + case strings.HasSuffix(k, "QryRej"): + chartID = keyQueryFailuresDetail + case strings.HasPrefix(k, "Qry"): + chartID = keyQueriesAnalysis + case strings.HasPrefix(k, "Update"): + chartID = keyReceivedUpdates + } + + if !b.charts.Has(chartID) { + _ = b.charts.Add(charts[chartID].Copy()) + } + + chart = b.charts.Get(chartID) + + if !chart.HasDim(k) { + _ = chart.AddDim(&Dim{ID: k, Name: dimName, Algo: algo}) + chart.MarkNotCreated() + } + + delete(stats.NSStats, k) + metrics[k] = v + } + + for _, v := range []struct { + item map[string]int64 + chartID string + }{ + {item: stats.NSStats, chartID: keyNSStats}, + {item: stats.OpCodes, chartID: keyInOpCodes}, + {item: stats.QTypes, chartID: keyInQTypes}, + {item: stats.SockStats, chartID: keyInSockStats}, + } { + if len(v.item) == 0 { + continue + } + + if !b.charts.Has(v.chartID) { + _ = b.charts.Add(charts[v.chartID].Copy()) + } + + chart = b.charts.Get(v.chartID) + + for key, val := range v.item { + if !chart.HasDim(key) { + _ = chart.AddDim(&Dim{ID: key, Algo: module.Incremental}) + chart.MarkNotCreated() + } + + metrics[key] = val + } + } + + if !(b.permitView != nil && len(stats.Views) > 0) { + return + } + + for name, view := range stats.Views { + if !b.permitView.MatchString(name) { + continue + } + r := view.Resolver + + delete(r.Stats, "BucketSize") + + for key, val := range r.Stats { + var ( + algo = module.Incremental + dimName = key + chartKey string + ) + + switch { + default: + chartKey = keyResolverStats + case key == "NumFetch": + chartKey = keyResolverNumFetch + dimName = "queries" + algo = module.Absolute + case strings.HasPrefix(key, "QryRTT"): + // TODO: not ordered + chartKey = keyResolverRTT + } + + chartID := fmt.Sprintf(chartKey, name) + + if !b.charts.Has(chartID) { + chart = charts[chartKey].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + } + + chart = b.charts.Get(chartID) + dimID := fmt.Sprintf("%s_%s", name, key) + + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: dimName, Algo: algo}) + chart.MarkNotCreated() + } + + metrics[dimID] = val + } + + if len(r.QTypes) > 0 { + chartID := fmt.Sprintf(keyResolverInQTypes, name) + + if !b.charts.Has(chartID) { + chart = charts[keyResolverInQTypes].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + } + + chart = b.charts.Get(chartID) + + for key, val := range r.QTypes { + dimID := fmt.Sprintf("%s_%s", name, key) + if !chart.HasDim(dimID) { + _ = chart.AddDim(&Dim{ID: dimID, Name: key, Algo: module.Incremental}) + chart.MarkNotCreated() + } + metrics[dimID] = val + } + } + + if len(r.CacheStats) > 0 { + chartID := fmt.Sprintf(keyResolverCacheHits, name) + + if !b.charts.Has(chartID) { + chart = charts[keyResolverCacheHits].Copy() + chart.ID = chartID + chart.Fam = fmt.Sprintf(chart.Fam, name) + _ = b.charts.Add(chart) + for _, dim := range chart.Dims { + dim.ID = fmt.Sprintf(dim.ID, name) + } + } + + metrics[name+"_CacheHits"] = r.CacheStats["CacheHits"] + metrics[name+"_CacheMisses"] = r.CacheStats["CacheMisses"] + } + } +} diff --git a/modules/bind/config_schema.json b/modules/bind/config_schema.json index 042f47a1a..0d77a6b95 100644 --- a/modules/bind/config_schema.json +++ b/modules/bind/config_schema.json @@ -1,21 +1,244 @@ { - "$id": "https://example.com/person.schema.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "title": "Bind collector job configuration", - "type": "object", - "properties": { - "firstName": { - "type": "string", - "description": "The person's first name." + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Bind collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "lastName": { - "type": "string", - "description": "The person's last name." + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "age": { - "description": "Age in years which must be equal to or greater than zero.", - "type": "integer", - "minimum": 0 + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Bind json v1 endpoint.", + "type": "string", + "default": "http://127.0.0.1:8653/json/v1" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true + }, + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } + }, + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." + }, + "password": { + "ui:widget": "password" + }, + "proxy_password": { + "ui:widget": "password" } } } diff --git a/modules/bind/init.go b/modules/bind/init.go new file mode 100644 index 000000000..daffe29bd --- /dev/null +++ b/modules/bind/init.go @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package bind + +import ( + "errors" + "fmt" + "net/http" + "strings" + + "github.com/netdata/go.d.plugin/pkg/matcher" +) + +func (b *Bind) validateConfig() error { + if b.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (b *Bind) initPermitViewMatcher() (matcher.Matcher, error) { + if b.PermitView == "" { + return nil, nil + } + return matcher.NewSimplePatternsMatcher(b.PermitView) +} + +func (b *Bind) initBindApiClient(httpClient *http.Client) (bindAPIClient, error) { + switch { + case strings.HasSuffix(b.URL, "/xml/v3"): // BIND 9.9+ + return newXML3Client(httpClient, b.Request), nil + case strings.HasSuffix(b.URL, "/json/v1"): // BIND 9.10+ + return newJSONClient(httpClient, b.Request), nil + default: + return nil, fmt.Errorf("URL %s is wrong, supported endpoints: `/xml/v3`, `/json/v1`", b.URL) + } +} diff --git a/modules/cassandra/cassandra.go b/modules/cassandra/cassandra.go index 1e745fbd8..a5702c421 100644 --- a/modules/cassandra/cassandra.go +++ b/modules/cassandra/cassandra.go @@ -4,6 +4,7 @@ package cassandra import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -32,7 +33,7 @@ func New() *Cassandra { URL: "http://127.0.0.1:7072/metrics", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -43,12 +44,13 @@ func New() *Cassandra { } type Config struct { - web.HTTP `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` } type Cassandra struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts @@ -58,24 +60,37 @@ type Cassandra struct { mx *cassandraMetrics } -func (c *Cassandra) Init() bool { +func (c *Cassandra) Configuration() any { + return c.Config +} + +func (c *Cassandra) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("error on validating config: %v", err) - return false + return err } prom, err := c.initPrometheusClient() if err != nil { c.Errorf("error on init prometheus client: %v", err) - return false + return err } c.prom = prom - return true + return nil } -func (c *Cassandra) Check() bool { - return len(c.Collect()) > 0 +func (c *Cassandra) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *Cassandra) Charts() *module.Charts { @@ -94,4 +109,8 @@ func (c *Cassandra) Collect() map[string]int64 { return mx } -func (c *Cassandra) Cleanup() {} +func (c *Cassandra) Cleanup() { + if c.prom != nil && c.prom.HTTPClient() != nil { + c.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/cassandra/cassandra_test.go b/modules/cassandra/cassandra_test.go index 4425de46e..7ef95b292 100644 --- a/modules/cassandra/cassandra_test.go +++ b/modules/cassandra/cassandra_test.go @@ -55,9 +55,9 @@ func TestCassandra_Init(t *testing.T) { c.Config = test.config if test.wantFail { - assert.False(t, c.Init()) + assert.Error(t, c.Init()) } else { - assert.True(t, c.Init()) + assert.NoError(t, c.Init()) } }) } @@ -90,12 +90,12 @@ func TestCassandra_Check(t *testing.T) { c, cleanup := test.prepare() defer cleanup() - require.True(t, c.Init()) + require.NoError(t, c.Init()) if test.wantFail { - assert.False(t, c.Check()) + assert.Error(t, c.Check()) } else { - assert.True(t, c.Check()) + assert.NoError(t, c.Check()) } }) } @@ -239,7 +239,7 @@ func TestCassandra_Collect(t *testing.T) { c, cleanup := test.prepare() defer cleanup() - require.True(t, c.Init()) + require.NoError(t, c.Init()) mx := c.Collect() diff --git a/modules/cassandra/config_schema.json b/modules/cassandra/config_schema.json index ff22764ec..0380cee3d 100644 --- a/modules/cassandra/config_schema.json +++ b/modules/cassandra/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/cassandra job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Cassandra collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Cassandra metrics page to monitor.", + "type": "string", + "default": "http://127.0.0.1:7072/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 5 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 5 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/chrony/chrony.go b/modules/chrony/chrony.go index 9f12325b9..0b043ed83 100644 --- a/modules/chrony/chrony.go +++ b/modules/chrony/chrony.go @@ -4,6 +4,7 @@ package chrony import ( _ "embed" + "errors" "time" "github.com/facebook/time/ntp/chrony" @@ -25,7 +26,7 @@ func New() *Chrony { return &Chrony{ Config: Config{ Address: "127.0.0.1:323", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, charts: charts.Copy(), newClient: newChronyClient, @@ -33,6 +34,8 @@ func New() *Chrony { } type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` + Address string `yaml:"address"` Timeout web.Duration `yaml:"timeout"` } @@ -54,17 +57,30 @@ type ( } ) -func (c *Chrony) Init() bool { +func (c *Chrony) Configuration() any { + return c.Config +} + +func (c *Chrony) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("config validation: %v", err) - return false + return err } - return true + return nil } -func (c *Chrony) Check() bool { - return len(c.Collect()) > 0 +func (c *Chrony) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *Chrony) Charts() *module.Charts { diff --git a/modules/chrony/chrony_test.go b/modules/chrony/chrony_test.go index a6568b234..95558f0ae 100644 --- a/modules/chrony/chrony_test.go +++ b/modules/chrony/chrony_test.go @@ -35,9 +35,9 @@ func TestChrony_Init(t *testing.T) { c.Config = test.config if test.wantFail { - assert.False(t, c.Init()) + assert.Error(t, c.Init()) } else { - assert.True(t, c.Init()) + assert.NoError(t, c.Init()) } }) } @@ -53,7 +53,7 @@ func TestChrony_Check(t *testing.T) { prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{}) }, }, "tracking: success, activity: fail": { - wantFail: false, + wantFail: true, prepare: func() *Chrony { return prepareChronyWithMock(&mockClient{errOnActivity: true}) }, }, "tracking: fail, activity: success": { @@ -74,12 +74,12 @@ func TestChrony_Check(t *testing.T) { t.Run(name, func(t *testing.T) { c := test.prepare() - require.True(t, c.Init()) + require.NoError(t, c.Init()) if test.wantFail { - assert.False(t, c.Check()) + assert.Error(t, c.Check()) } else { - assert.True(t, c.Check()) + assert.NoError(t, c.Check()) } }) } @@ -100,15 +100,15 @@ func TestChrony_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(c *Chrony) { c.Init() }, + prepare: func(c *Chrony) { _ = c.Init() }, }, "after Check": { wantClose: true, - prepare: func(c *Chrony) { c.Init(); c.Check() }, + prepare: func(c *Chrony) { _ = c.Init(); _ = c.Check() }, }, "after Collect": { wantClose: true, - prepare: func(c *Chrony) { c.Init(); c.Collect() }, + prepare: func(c *Chrony) { _ = c.Init(); _ = c.Collect() }, }, } @@ -197,7 +197,7 @@ func TestChrony_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { c := test.prepare() - require.True(t, c.Init()) + require.NoError(t, c.Init()) _ = c.Check() collected := c.Collect() diff --git a/modules/chrony/client.go b/modules/chrony/client.go index caa219f3b..e850ff239 100644 --- a/modules/chrony/client.go +++ b/modules/chrony/client.go @@ -10,7 +10,7 @@ import ( ) func newChronyClient(c Config) (chronyClient, error) { - conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration) + conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration()) if err != nil { return nil, err } diff --git a/modules/chrony/config_schema.json b/modules/chrony/config_schema.json index 105adaa79..103763708 100644 --- a/modules/chrony/config_schema.json +++ b/modules/chrony/config_schema.json @@ -1,23 +1,36 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/chrony job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Chrony collector configuration.", + "type": "object", + "properties": { + "address": { + "title": "Address", + "description": "Chrony address. The format is IP:PORT.", + "type": "string", + "default": "127.0.0.1:323" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "Connection timeout in seconds.", + "type": "number", + "default": 1 + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/chrony/init.go b/modules/chrony/init.go index 70c8916f2..828112c9d 100644 --- a/modules/chrony/init.go +++ b/modules/chrony/init.go @@ -6,7 +6,7 @@ import ( "errors" ) -func (c Chrony) validateConfig() error { +func (c *Chrony) validateConfig() error { if c.Address == "" { return errors.New("empty 'address'") } diff --git a/modules/cockroachdb/cockroachdb.go b/modules/cockroachdb/cockroachdb.go index 0a862f97e..5d4dd0f83 100644 --- a/modules/cockroachdb/cockroachdb.go +++ b/modules/cockroachdb/cockroachdb.go @@ -13,91 +13,89 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -// DefaultMetricsSampleInterval hard coded to 10 -// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58 -const cockroachDBSamplingInterval = 10 - //go:embed "config_schema.json" var configSchema string +// DefaultMetricsSampleInterval hard coded to 10 +// https://github.com/cockroachdb/cockroach/blob/d5ffbf76fb4c4ef802836529188e4628476879bd/pkg/server/config.go#L56-L58 +const dbSamplingInterval = 10 + func init() { module.Register("cockroachdb", module.Creator{ JobConfigSchema: configSchema, Defaults: module.Defaults{ - UpdateEvery: cockroachDBSamplingInterval, + UpdateEvery: dbSamplingInterval, }, Create: func() module.Module { return New() }, }) } func New() *CockroachDB { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:8080/_status/vars", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &CockroachDB{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8080/_status/vars", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &CockroachDB{ - Config: config, charts: charts.Copy(), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - UpdateEvery int `yaml:"update_every"` - } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` - CockroachDB struct { - module.Base - Config `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` +} - prom prometheus.Prometheus - charts *Charts - } -) +type CockroachDB struct { + module.Base + Config `yaml:",inline" json:",inline"` -func (c *CockroachDB) validateConfig() error { - if c.URL == "" { - return errors.New("URL is not set") - } - return nil + prom prometheus.Prometheus + charts *Charts } -func (c *CockroachDB) initClient() error { - client, err := web.NewHTTPClient(c.Client) - if err != nil { - return err - } - - c.prom = prometheus.New(client, c.Request) - return nil +func (c *CockroachDB) Configuration() any { + return c.Config } -func (c *CockroachDB) Init() bool { +func (c *CockroachDB) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("error on validating config: %v", err) - return false + return err } - if err := c.initClient(); err != nil { - c.Errorf("error on initializing client: %v", err) - return false + + prom, err := c.initPrometheusClient() + if err != nil { + c.Error(err) + return err } - if c.UpdateEvery < cockroachDBSamplingInterval { + c.prom = prom + + if c.UpdateEvery < dbSamplingInterval { c.Warningf("'update_every'(%d) is lower then CockroachDB default sampling interval (%d)", - c.UpdateEvery, cockroachDBSamplingInterval) + c.UpdateEvery, dbSamplingInterval) } - return true + + return nil } -func (c *CockroachDB) Check() bool { - return len(c.Collect()) > 0 +func (c *CockroachDB) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *CockroachDB) Charts() *Charts { @@ -116,4 +114,8 @@ func (c *CockroachDB) Collect() map[string]int64 { return mx } -func (CockroachDB) Cleanup() {} +func (c *CockroachDB) Cleanup() { + if c.prom != nil && c.prom.HTTPClient() != nil { + c.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/cockroachdb/cockroachdb_test.go b/modules/cockroachdb/cockroachdb_test.go index 88c307716..f99f60b9b 100644 --- a/modules/cockroachdb/cockroachdb_test.go +++ b/modules/cockroachdb/cockroachdb_test.go @@ -30,36 +30,36 @@ func TestNew(t *testing.T) { func TestCockroachDB_Init(t *testing.T) { cdb := prepareCockroachDB() - assert.True(t, cdb.Init()) + assert.NoError(t, cdb.Init()) } func TestCockroachDB_Init_ReturnsFalseIfConfigURLIsNotSet(t *testing.T) { cdb := prepareCockroachDB() cdb.URL = "" - assert.False(t, cdb.Init()) + assert.Error(t, cdb.Init()) } func TestCockroachDB_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { cdb := prepareCockroachDB() cdb.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, cdb.Init()) + assert.Error(t, cdb.Init()) } func TestCockroachDB_Check(t *testing.T) { cdb, srv := prepareClientServer(t) defer srv.Close() - assert.True(t, cdb.Check()) + assert.NoError(t, cdb.Check()) } func TestCockroachDB_Check_ReturnsFalseIfConnectionRefused(t *testing.T) { cdb := New() cdb.URL = "http://127.0.0.1:38001/metrics" - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) - assert.False(t, cdb.Check()) + assert.Error(t, cdb.Check()) } func TestCockroachDB_Charts(t *testing.T) { @@ -221,7 +221,7 @@ func TestCockroachDB_Collect_ReturnsNilIfNotCockroachDBMetrics(t *testing.T) { func TestCockroachDB_Collect_ReturnsNilIfConnectionRefused(t *testing.T) { cdb := prepareCockroachDB() - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) assert.Nil(t, cdb.Collect()) } @@ -272,7 +272,7 @@ func prepareClientServer(t *testing.T) (*CockroachDB, *httptest.Server) { cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } @@ -286,7 +286,7 @@ func prepareClientServerNotCockroachDBMetricResponse(t *testing.T) (*CockroachDB cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } @@ -300,7 +300,7 @@ func prepareClientServerInvalidDataResponse(t *testing.T) (*CockroachDB, *httpte cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } @@ -314,6 +314,6 @@ func prepareClientServerResponse404(t *testing.T) (*CockroachDB, *httptest.Serve cdb := New() cdb.URL = ts.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, ts } diff --git a/modules/cockroachdb/config_schema.json b/modules/cockroachdb/config_schema.json index e732b99f6..7f76b16fa 100644 --- a/modules/cockroachdb/config_schema.json +++ b/modules/cockroachdb/config_schema.json @@ -1,59 +1,247 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/cockroachdb job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "CockroachDB collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "proxy", + "headers", + "tls", + "all" + ], + "default": "base" + } }, - "proxy_password": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the CockroachDB Prometheus endpoint.", + "type": "string", + "default": "http://127.0.0.1:8080/_status/vars" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 10 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "not_follow_redirects": { - "type": "boolean" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_ca": { - "type": "string" + "update_every": { + "ui:help": "Set the interval to exceed 10 seconds to align with CockroachDB's default sampling rate." }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/cockroachdb/init.go b/modules/cockroachdb/init.go new file mode 100644 index 000000000..07986a199 --- /dev/null +++ b/modules/cockroachdb/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package cockroachdb + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +func (c *CockroachDB) validateConfig() error { + if c.URL == "" { + return errors.New("URL is not set") + } + return nil +} + +func (c *CockroachDB) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(c.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, c.Request), nil +} diff --git a/modules/consul/config_schema.json b/modules/consul/config_schema.json index a71723696..0e9008076 100644 --- a/modules/consul/config_schema.json +++ b/modules/consul/config_schema.json @@ -1,62 +1,253 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/consul job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Consul collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } + }, + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } + }, + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Consul HTTP API.", + "type": "string", + "default": "http://127.0.0.1:8500" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "acl_token": { + "title": "X-Consul-Token", + "description": "The token for authentication (if required).", + "type": "string", + "sensitive": true + }, + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "url": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, "timeout": { - "type": [ - "string", - "integer" - ] + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "acl_token": { - "type": "string" - }, - "username": { - "type": "string" + "ui:widget": "password" }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/consul/consul.go b/modules/consul/consul.go index ebd10984a..6b2015522 100644 --- a/modules/consul/consul.go +++ b/modules/consul/consul.go @@ -4,6 +4,7 @@ package consul import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -32,8 +33,12 @@ func New() *Consul { return &Consul{ Config: Config{ HTTP: web.HTTP{ - Request: web.Request{URL: "http://127.0.0.1:8500"}, - Client: web.Client{Timeout: web.Duration{Duration: time.Second * 2}}, + Request: web.Request{ + URL: "http://127.0.0.1:8500", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, charts: &module.Charts{}, @@ -44,15 +49,16 @@ func New() *Consul { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` - ACLToken string `yaml:"acl_token"` + web.HTTP `yaml:",inline" json:",inline"` + ACLToken string `yaml:"acl_token" json:"acl_token"` } type Consul struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts addGlobalChartsOnce *sync.Once @@ -69,31 +75,44 @@ type Consul struct { checks map[string]bool } -func (c *Consul) Init() bool { +func (c *Consul) Configuration() any { + return c.Config +} + +func (c *Consul) Init() error { if err := c.validateConfig(); err != nil { c.Errorf("config validation: %v", err) - return false + return err } httpClient, err := c.initHTTPClient() if err != nil { c.Errorf("init HTTP client: %v", err) - return false + return err } c.httpClient = httpClient prom, err := c.initPrometheusClient(httpClient) if err != nil { c.Errorf("init Prometheus client: %v", err) - return false + return err } c.prom = prom - return true + return nil } -func (c *Consul) Check() bool { - return len(c.Collect()) > 0 +func (c *Consul) Check() error { + mx, err := c.collect() + if err != nil { + c.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (c *Consul) Charts() *module.Charts { diff --git a/modules/consul/consul_test.go b/modules/consul/consul_test.go index b8f990893..7d68edd91 100644 --- a/modules/consul/consul_test.go +++ b/modules/consul/consul_test.go @@ -78,9 +78,9 @@ func TestConsul_Init(t *testing.T) { consul.Config = test.config if test.wantFail { - assert.False(t, consul.Init()) + assert.Error(t, consul.Init()) } else { - assert.True(t, consul.Init()) + assert.NoError(t, consul.Init()) } }) } @@ -131,9 +131,9 @@ func TestConsul_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, consul.Check()) + assert.Error(t, consul.Check()) } else { - assert.True(t, consul.Check()) + assert.NoError(t, consul.Check()) } }) } @@ -561,7 +561,7 @@ func caseConsulV1143CloudServerResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -589,7 +589,7 @@ func caseConsulV1132ServerResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -617,7 +617,7 @@ func caseConsulV1132ServerWithHostnameResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -643,7 +643,7 @@ func caseConsulV1132ServerWithDisabledPrometheus(t *testing.T) (*Consul, func()) consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -667,7 +667,7 @@ func caseConsulV1132ClientResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -682,7 +682,7 @@ func caseInvalidDataResponse(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } @@ -691,7 +691,7 @@ func caseConnectionRefused(t *testing.T) (*Consul, func()) { t.Helper() consul := New() consul.URL = "http://127.0.0.1:65535/" - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, func() {} } @@ -705,7 +705,7 @@ func case404(t *testing.T) (*Consul, func()) { consul := New() consul.URL = srv.URL - require.True(t, consul.Init()) + require.NoError(t, consul.Init()) return consul, srv.Close } diff --git a/modules/coredns/config_schema.json b/modules/coredns/config_schema.json index 70b9ef001..dd4156d51 100644 --- a/modules/coredns/config_schema.json +++ b/modules/coredns/config_schema.json @@ -1,93 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/coredns job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "CoreDNS collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "per_server_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the CoreDNS metrics page to monitor.", + "type": "string", + "default": "http://127.0.0.1:9153/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" } }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true } } - } - }, - "per_zone_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true } - }, - "excludes": { - "type": "array", - "items": { + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", "type": "string" } } } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/coredns/coredns.go b/modules/coredns/coredns.go index 18c92caf3..bc9a7f9c1 100644 --- a/modules/coredns/coredns.go +++ b/modules/coredns/coredns.go @@ -4,6 +4,7 @@ package coredns import ( _ "embed" + "errors" "time" "github.com/blang/semver/v4" @@ -14,11 +15,6 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -const ( - defaultURL = "http://127.0.0.1:9153/metrics" - defaultHTTPTimeout = time.Second * 2 -) - //go:embed "config_schema.json" var configSchema string @@ -31,18 +27,17 @@ func init() { // New creates CoreDNS with default values. func New() *CoreDNS { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &CoreDNS{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9153/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - return &CoreDNS{ - Config: config, charts: summaryCharts.Copy(), collectedServers: make(map[string]bool), collectedZones: make(map[string]bool), @@ -51,17 +46,22 @@ func New() *CoreDNS { // Config is the CoreDNS module configuration. type Config struct { - web.HTTP `yaml:",inline"` - PerServerStats matcher.SimpleExpr `yaml:"per_server_stats"` - PerZoneStats matcher.SimpleExpr `yaml:"per_zone_stats"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` + PerServerStats matcher.SimpleExpr `yaml:"per_server_stats" json:"per_server_stats"` + PerZoneStats matcher.SimpleExpr `yaml:"per_zone_stats" json:"per_zone_stats"` } // CoreDNS CoreDNS module. type CoreDNS struct { module.Base - Config `yaml:",inline"` - charts *Charts - prom prometheus.Prometheus + Config `yaml:",inline" json:",inline"` + + prom prometheus.Prometheus + + charts *Charts + perServerMatcher matcher.Matcher perZoneMatcher matcher.Matcher collectedServers map[string]bool @@ -71,48 +71,57 @@ type CoreDNS struct { metricNames requestMetricsNames } -// Cleanup makes cleanup. -func (CoreDNS) Cleanup() {} +func (cd *CoreDNS) Configuration() any { + return cd.Config +} // Init makes initialization. -func (cd *CoreDNS) Init() bool { - if cd.URL == "" { - cd.Error("URL not set") - return false +func (cd *CoreDNS) Init() error { + if err := cd.validateConfig(); err != nil { + cd.Errorf("config validation: %v", err) + return err } - if !cd.PerServerStats.Empty() { - m, err := cd.PerServerStats.Parse() - if err != nil { - cd.Errorf("error on creating 'per_server_stats' matcher : %v", err) - return false - } - cd.perServerMatcher = matcher.WithCache(m) + sm, err := cd.initPerServerMatcher() + if err != nil { + cd.Error(err) + return err } - - if !cd.PerZoneStats.Empty() { - m, err := cd.PerZoneStats.Parse() - if err != nil { - cd.Errorf("error on creating 'per_zone_stats' matcher : %v", err) - return false - } - cd.perZoneMatcher = matcher.WithCache(m) + if sm != nil { + cd.perServerMatcher = sm } - client, err := web.NewHTTPClient(cd.Client) + zm, err := cd.initPerZoneMatcher() if err != nil { - cd.Errorf("error on creating http client : %v", err) - return false + cd.Error(err) + return err + } + if zm != nil { + cd.perZoneMatcher = zm } - cd.prom = prometheus.New(client, cd.Request) + prom, err := cd.initPrometheusClient() + if err != nil { + cd.Error(err) + return err + } + cd.prom = prom - return true + return nil } // Check makes check. -func (cd *CoreDNS) Check() bool { - return len(cd.Collect()) > 0 +func (cd *CoreDNS) Check() error { + mx, err := cd.collect() + if err != nil { + cd.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } // Charts creates Charts. @@ -131,3 +140,10 @@ func (cd *CoreDNS) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (cd *CoreDNS) Cleanup() { + if cd.prom != nil && cd.prom.HTTPClient() != nil { + cd.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/coredns/coredns_test.go b/modules/coredns/coredns_test.go index a6b77976a..3056b3b98 100644 --- a/modules/coredns/coredns_test.go +++ b/modules/coredns/coredns_test.go @@ -20,24 +20,18 @@ var ( testNoLoadNoVersion, _ = os.ReadFile("testdata/no_version/no_load.txt") ) -func TestNew(t *testing.T) { - job := New() - - assert.IsType(t, (*CoreDNS)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestCoreDNS_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } func TestCoreDNS_Cleanup(t *testing.T) { New().Cleanup() } -func TestCoreDNS_Init(t *testing.T) { assert.True(t, New().Init()) } +func TestCoreDNS_Init(t *testing.T) { + assert.NoError(t, New().Init()) +} func TestCoreDNS_InitNG(t *testing.T) { job := New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestCoreDNS_Check(t *testing.T) { @@ -60,8 +54,8 @@ func TestCoreDNS_Check(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) }) } } @@ -69,8 +63,8 @@ func TestCoreDNS_Check(t *testing.T) { func TestCoreDNS_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestCoreDNS_Collect(t *testing.T) { @@ -95,8 +89,8 @@ func TestCoreDNS_Collect(t *testing.T) { job.URL = ts.URL + "/metrics" job.PerServerStats.Includes = []string{"glob:*"} job.PerZoneStats.Includes = []string{"glob:*"} - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "coredns.io._request_per_ip_family_v4": 19, @@ -444,8 +438,8 @@ func TestCoreDNS_CollectNoLoad(t *testing.T) { job.URL = ts.URL + "/metrics" job.PerServerStats.Includes = []string{"glob:*"} job.PerZoneStats.Includes = []string{"glob:*"} - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "no_matching_zone_dropped_total": 0, @@ -513,8 +507,8 @@ func TestCoreDNS_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestCoreDNS_404(t *testing.T) { @@ -527,8 +521,8 @@ func TestCoreDNS_404(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestCoreDNS_CollectNoVersion(t *testing.T) { @@ -543,8 +537,8 @@ func TestCoreDNS_CollectNoVersion(t *testing.T) { job.URL = ts.URL + "/metrics" job.PerServerStats.Includes = []string{"glob:*"} job.PerZoneStats.Includes = []string{"glob:*"} - require.True(t, job.Init()) - require.False(t, job.Check()) + require.NoError(t, job.Init()) + require.Error(t, job.Check()) assert.Nil(t, job.Collect()) } diff --git a/modules/coredns/init.go b/modules/coredns/init.go new file mode 100644 index 000000000..79d05926d --- /dev/null +++ b/modules/coredns/init.go @@ -0,0 +1,40 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package coredns + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (cd *CoreDNS) validateConfig() error { + if cd.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (cd *CoreDNS) initPerServerMatcher() (matcher.Matcher, error) { + if cd.PerServerStats.Empty() { + return nil, nil + } + return cd.PerServerStats.Parse() +} + +func (cd *CoreDNS) initPerZoneMatcher() (matcher.Matcher, error) { + if cd.PerZoneStats.Empty() { + return nil, nil + } + return cd.PerZoneStats.Parse() +} + +func (cd *CoreDNS) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(cd.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, cd.Request), nil +} diff --git a/modules/couchbase/config_schema.json b/modules/couchbase/config_schema.json index 307a1261b..b43a78d8c 100644 --- a/modules/couchbase/config_schema.json +++ b/modules/couchbase/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/couchbase job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Couchbase collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Couchbase server management REST API.", + "type": "string", + "default": "http://127.0.0.1:8091" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 5 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 5 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/couchbase/couchbase.go b/modules/couchbase/couchbase.go index b92ec2d76..86c06f969 100644 --- a/modules/couchbase/couchbase.go +++ b/modules/couchbase/couchbase.go @@ -4,6 +4,7 @@ package couchbase import ( _ "embed" + "errors" "net/http" "time" @@ -32,7 +33,7 @@ func New() *Couchbase { URL: "http://127.0.0.1:8091", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -40,53 +41,60 @@ func New() *Couchbase { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } - Couchbase struct { - module.Base - Config `yaml:",inline"` +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` - httpClient *http.Client - charts *module.Charts - collectedBuckets map[string]bool - } -) + web.HTTP `yaml:",inline" json:",inline"` +} -func (cb *Couchbase) Cleanup() { - if cb.httpClient == nil { - return - } - cb.httpClient.CloseIdleConnections() +type Couchbase struct { + module.Base + Config `yaml:",inline" json:",inline"` + + httpClient *http.Client + charts *module.Charts + collectedBuckets map[string]bool } -func (cb *Couchbase) Init() bool { +func (cb *Couchbase) Configuration() any { + return cb.Config +} + +func (cb *Couchbase) Init() error { err := cb.validateConfig() if err != nil { cb.Errorf("check configuration: %v", err) - return false + return err } httpClient, err := cb.initHTTPClient() if err != nil { cb.Errorf("init HTTP client: %v", err) - return false + return err } cb.httpClient = httpClient charts, err := cb.initCharts() if err != nil { cb.Errorf("init charts: %v", err) - return false + return err } - cb.charts = charts - return true + + return nil } -func (cb *Couchbase) Check() bool { - return len(cb.Collect()) > 0 +func (cb *Couchbase) Check() error { + mx, err := cb.collect() + if err != nil { + cb.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (cb *Couchbase) Charts() *Charts { @@ -104,3 +112,10 @@ func (cb *Couchbase) Collect() map[string]int64 { } return mx } + +func (cb *Couchbase) Cleanup() { + if cb.httpClient == nil { + return + } + cb.httpClient.CloseIdleConnections() +} diff --git a/modules/couchbase/couchbase_test.go b/modules/couchbase/couchbase_test.go index da0fa4e66..565b896ff 100644 --- a/modules/couchbase/couchbase_test.go +++ b/modules/couchbase/couchbase_test.go @@ -67,9 +67,9 @@ func TestCouchbase_Init(t *testing.T) { cb.Config = test.config if test.wantFail { - assert.False(t, cb.Init()) + assert.Error(t, cb.Init()) } else { - assert.True(t, cb.Init()) + assert.NoError(t, cb.Init()) } }) } @@ -103,9 +103,9 @@ func TestCouchbase_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, cb.Check()) + assert.Error(t, cb.Check()) } else { - assert.True(t, cb.Check()) + assert.NoError(t, cb.Check()) } }) } @@ -178,7 +178,7 @@ func prepareCouchbaseV660(t *testing.T) (cb *Couchbase, cleanup func()) { cb = New() cb.URL = srv.URL - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, srv.Close } @@ -191,7 +191,7 @@ func prepareCouchbaseInvalidData(t *testing.T) (*Couchbase, func()) { })) cb := New() cb.URL = srv.URL - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, srv.Close } @@ -204,7 +204,7 @@ func prepareCouchbase404(t *testing.T) (*Couchbase, func()) { })) cb := New() cb.URL = srv.URL - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, srv.Close } @@ -213,7 +213,7 @@ func prepareCouchbaseConnectionRefused(t *testing.T) (*Couchbase, func()) { t.Helper() cb := New() cb.URL = "http://127.0.0.1:38001" - require.True(t, cb.Init()) + require.NoError(t, cb.Init()) return cb, func() {} } diff --git a/modules/couchbase/init.go b/modules/couchbase/init.go index c274ee572..abb330717 100644 --- a/modules/couchbase/init.go +++ b/modules/couchbase/init.go @@ -24,11 +24,11 @@ func (cb *Couchbase) initCharts() (*Charts, error) { return bucketCharts.Copy(), nil } -func (cb Couchbase) initHTTPClient() (*http.Client, error) { +func (cb *Couchbase) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(cb.Client) } -func (cb Couchbase) validateConfig() error { +func (cb *Couchbase) validateConfig() error { if cb.URL == "" { return errors.New("URL not set") } diff --git a/modules/couchdb/collect.go b/modules/couchdb/collect.go index 9fd041800..27dd33549 100644 --- a/modules/couchdb/collect.go +++ b/modules/couchdb/collect.go @@ -42,7 +42,7 @@ func (cdb *CouchDB) collect() (map[string]int64, error) { return collected, nil } -func (CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) { +func (cdb *CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) { if !ms.hasNodeStats() { return } @@ -56,7 +56,7 @@ func (CouchDB) collectNodeStats(collected map[string]int64, ms *cdbMetrics) { } } -func (CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) { +func (cdb *CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) { if !ms.hasNodeSystem() { return } @@ -68,7 +68,7 @@ func (CouchDB) collectSystemStats(collected map[string]int64, ms *cdbMetrics) { collected["peak_msg_queue"] = findMaxMQSize(ms.NodeSystem.MessageQueues) } -func (CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) { +func (cdb *CouchDB) collectActiveTasks(collected map[string]int64, ms *cdbMetrics) { collected["active_tasks_indexer"] = 0 collected["active_tasks_database_compaction"] = 0 collected["active_tasks_replication"] = 0 diff --git a/modules/couchdb/config_schema.json b/modules/couchdb/config_schema.json index e3a67e322..eff51e78e 100644 --- a/modules/couchdb/config_schema.json +++ b/modules/couchdb/config_schema.json @@ -1,65 +1,256 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/couchdb job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "CouchDB collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "node": { - "type": "string" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the CouchDB web server.", + "type": "string", + "default": "http://127.0.0.1:5984" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 10 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 5 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "node": { + "title": "Node name", + "description": "CouchDB node name. Same as -name vm.args argument.", + "type": "string", + "default": "_local" + }, + "databases": { + "title": "Databases", + "description": "A space-separated list of database names for which database-specific statistics should be displayed.", + "type": "string" + } + }, + "required": [ + "url", + "node" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "databases": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "username": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/couchdb/couchdb.go b/modules/couchdb/couchdb.go index 3342b7b7f..943a3feed 100644 --- a/modules/couchdb/couchdb.go +++ b/modules/couchdb/couchdb.go @@ -4,6 +4,7 @@ package couchdb import ( _ "embed" + "errors" "net/http" "strings" "time" @@ -33,7 +34,7 @@ func New() *CouchDB { URL: "http://127.0.0.1:5984", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, Node: "_local", @@ -41,36 +42,33 @@ func New() *CouchDB { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - Node string `yaml:"node"` - Databases string `yaml:"databases"` - } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` - CouchDB struct { - module.Base - Config `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` + Node string `yaml:"node" json:"node"` + Databases string `yaml:"databases" json:"databases"` +} - httpClient *http.Client - charts *module.Charts +type CouchDB struct { + module.Base + Config `yaml:",inline" json:",inline"` - databases []string - } -) + httpClient *http.Client + charts *module.Charts -func (cdb *CouchDB) Cleanup() { - if cdb.httpClient == nil { - return - } - cdb.httpClient.CloseIdleConnections() + databases []string +} + +func (cdb *CouchDB) Configuration() any { + return cdb.Config } -func (cdb *CouchDB) Init() bool { +func (cdb *CouchDB) Init() error { err := cdb.validateConfig() if err != nil { cdb.Errorf("check configuration: %v", err) - return false + return err } cdb.databases = strings.Fields(cdb.Config.Databases) @@ -78,26 +76,37 @@ func (cdb *CouchDB) Init() bool { httpClient, err := cdb.initHTTPClient() if err != nil { cdb.Errorf("init HTTP client: %v", err) - return false + return err } cdb.httpClient = httpClient charts, err := cdb.initCharts() if err != nil { cdb.Errorf("init charts: %v", err) - return false + return err } cdb.charts = charts - return true + return nil } -func (cdb *CouchDB) Check() bool { +func (cdb *CouchDB) Check() error { if err := cdb.pingCouchDB(); err != nil { cdb.Error(err) - return false + return err } - return len(cdb.Collect()) > 0 + + mx, err := cdb.collect() + if err != nil { + cdb.Error(err) + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") + } + + return nil } func (cdb *CouchDB) Charts() *Charts { @@ -115,3 +124,10 @@ func (cdb *CouchDB) Collect() map[string]int64 { } return mx } + +func (cdb *CouchDB) Cleanup() { + if cdb.httpClient == nil { + return + } + cdb.httpClient.CloseIdleConnections() +} diff --git a/modules/couchdb/couchdb_test.go b/modules/couchdb/couchdb_test.go index 29b5b64af..d61b33e41 100644 --- a/modules/couchdb/couchdb_test.go +++ b/modules/couchdb/couchdb_test.go @@ -79,9 +79,9 @@ func TestCouchDB_Init(t *testing.T) { es.Config = test.config if test.wantFail { - assert.False(t, es.Init()) + assert.Error(t, es.Init()) } else { - assert.True(t, es.Init()) + assert.NoError(t, es.Init()) assert.Equal(t, test.wantNumOfCharts, len(*es.Charts())) } }) @@ -105,9 +105,9 @@ func TestCouchDB_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, cdb.Check()) + assert.Error(t, cdb.Check()) } else { - assert.True(t, cdb.Check()) + assert.NoError(t, cdb.Check()) } }) } @@ -387,7 +387,7 @@ func prepareCouchDB(t *testing.T, createCDB func() *CouchDB) (cdb *CouchDB, clea srv := prepareCouchDBEndpoint() cdb.URL = srv.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, srv.Close } @@ -404,7 +404,7 @@ func prepareCouchDBInvalidData(t *testing.T) (*CouchDB, func()) { })) cdb := New() cdb.URL = srv.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, srv.Close } @@ -417,7 +417,7 @@ func prepareCouchDB404(t *testing.T) (*CouchDB, func()) { })) cdb := New() cdb.URL = srv.URL - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, srv.Close } @@ -426,7 +426,7 @@ func prepareCouchDBConnectionRefused(t *testing.T) (*CouchDB, func()) { t.Helper() cdb := New() cdb.URL = "http://127.0.0.1:38001" - require.True(t, cdb.Init()) + require.NoError(t, cdb.Init()) return cdb, func() {} } diff --git a/modules/dnsdist/config_schema.json b/modules/dnsdist/config_schema.json index 880190ce2..842de7a7c 100644 --- a/modules/dnsdist/config_schema.json +++ b/modules/dnsdist/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dnsdist job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DNSDist collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the DNSDist built-in webserver.", + "type": "string", + "default": "http://127.0.0.1:8083" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/dnsdist/dnsdist.go b/modules/dnsdist/dnsdist.go index 0af242534..b43982342 100644 --- a/modules/dnsdist/dnsdist.go +++ b/modules/dnsdist/dnsdist.go @@ -4,6 +4,7 @@ package dnsdist import ( _ "embed" + "errors" "net/http" "time" @@ -24,18 +25,6 @@ func init() { }) } -type Config struct { - web.HTTP `yaml:",inline"` -} - -type DNSdist struct { - module.Base - Config `yaml:",inline"` - - httpClient *http.Client - charts *module.Charts -} - func New() *DNSdist { return &DNSdist{ Config: Config{ @@ -44,39 +33,66 @@ func New() *DNSdist { URL: "http://127.0.0.1:8083", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, } } -func (d *DNSdist) Init() bool { +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` +} + +type DNSdist struct { + module.Base + Config `yaml:",inline" json:",inline"` + + httpClient *http.Client + charts *module.Charts +} + +func (d *DNSdist) Configuration() any { + return d.Config +} + +func (d *DNSdist) Init() error { err := d.validateConfig() if err != nil { d.Errorf("config validation: %v", err) - return false + return err } client, err := d.initHTTPClient() if err != nil { d.Errorf("init HTTP client: %v", err) - return false + return err } d.httpClient = client cs, err := d.initCharts() if err != nil { d.Errorf("init charts: %v", err) - return false + return err } d.charts = cs - return true + return nil } -func (d *DNSdist) Check() bool { - return len(d.Collect()) > 0 +func (d *DNSdist) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *DNSdist) Charts() *module.Charts { @@ -100,6 +116,5 @@ func (d *DNSdist) Cleanup() { if d.httpClient == nil { return } - d.httpClient.CloseIdleConnections() } diff --git a/modules/dnsdist/dnsdist_test.go b/modules/dnsdist/dnsdist_test.go index 851d99016..3a3265de2 100644 --- a/modules/dnsdist/dnsdist_test.go +++ b/modules/dnsdist/dnsdist_test.go @@ -68,9 +68,9 @@ func Test_Init(t *testing.T) { ns.Config = test.config if test.wantFail { - assert.False(t, ns.Init()) + assert.Error(t, ns.Init()) } else { - assert.True(t, ns.Init()) + assert.NoError(t, ns.Init()) } }) } @@ -78,7 +78,7 @@ func Test_Init(t *testing.T) { func Test_Charts(t *testing.T) { dist := New() - require.True(t, dist.Init()) + require.NoError(t, dist.Init()) assert.NotNil(t, dist.Charts()) } @@ -113,12 +113,12 @@ func Test_Check(t *testing.T) { t.Run(name, func(t *testing.T) { dist, cleanup := test.prepare() defer cleanup() - require.True(t, dist.Init()) + require.NoError(t, dist.Init()) if test.wantFail { - assert.False(t, dist.Check()) + assert.Error(t, dist.Check()) } else { - assert.True(t, dist.Check()) + assert.NoError(t, dist.Check()) } }) } @@ -181,7 +181,7 @@ func Test_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { dist, cleanup := test.prepare() defer cleanup() - require.True(t, dist.Init()) + require.NoError(t, dist.Init()) collected := dist.Collect() diff --git a/modules/dnsdist/init.go b/modules/dnsdist/init.go index d58891681..41c92edc6 100644 --- a/modules/dnsdist/init.go +++ b/modules/dnsdist/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (d DNSdist) validateConfig() error { +func (d *DNSdist) validateConfig() error { if d.URL == "" { return errors.New("URL not set") } @@ -22,10 +22,10 @@ func (d DNSdist) validateConfig() error { return nil } -func (d DNSdist) initHTTPClient() (*http.Client, error) { +func (d *DNSdist) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(d.Client) } -func (d DNSdist) initCharts() (*module.Charts, error) { +func (d *DNSdist) initCharts() (*module.Charts, error) { return charts.Copy(), nil } diff --git a/modules/dnsmasq/config_schema.json b/modules/dnsmasq/config_schema.json index d08819917..63beeea53 100644 --- a/modules/dnsmasq/config_schema.json +++ b/modules/dnsmasq/config_schema.json @@ -1,26 +1,48 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dnsmasq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Dnsmasq collector configuration.", + "type": "object", + "properties": { + "address": { + "title": "Address", + "description": "Dnsmasq address. The format is IP:PORT.", + "type": "string", + "default": "127.0.0.1:53" + }, + "protocol": { + "title": "Protocol", + "description": "DNS query transport protocol. Supported protocols: udp, tcp, tcp-tls.", + "type": "string", + "enum": [ + "udp", + "tcp", + "tcp-tls" + ], + "default": "udp" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "minimum": 1, + "default": 1, + "type": "integer" + }, + "timeout": { + "title": "Timeout", + "description": "Connection timeout in seconds.", + "type": "number", + "default": 1 + } }, - "protocol": { - "type": "string" - }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "address", + "protocol" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/dnsmasq/dnsmasq.go b/modules/dnsmasq/dnsmasq.go index 33e252b09..460d79ee3 100644 --- a/modules/dnsmasq/dnsmasq.go +++ b/modules/dnsmasq/dnsmasq.go @@ -4,6 +4,7 @@ package dnsmasq import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func New() *Dnsmasq { Config: Config{ Protocol: "udp", Address: "127.0.0.1:53", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, newDNSClient: func(network string, timeout time.Duration) dnsClient { @@ -40,6 +41,8 @@ func New() *Dnsmasq { } type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` + Protocol string `yaml:"protocol"` Address string `yaml:"address"` Timeout web.Duration `yaml:"timeout"` @@ -61,32 +64,45 @@ type ( } ) -func (d *Dnsmasq) Init() bool { +func (d *Dnsmasq) Configuration() any { + return d.Config +} + +func (d *Dnsmasq) Init() error { err := d.validateConfig() if err != nil { d.Errorf("config validation: %v", err) - return false + return err } client, err := d.initDNSClient() if err != nil { d.Errorf("init DNS client: %v", err) - return false + return err } d.dnsClient = client charts, err := d.initCharts() if err != nil { d.Errorf("init charts: %v", err) - return false + return err } d.charts = charts - return true + return nil } -func (d *Dnsmasq) Check() bool { - return len(d.Collect()) > 0 +func (d *Dnsmasq) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *Dnsmasq) Charts() *module.Charts { @@ -105,4 +121,4 @@ func (d *Dnsmasq) Collect() map[string]int64 { return ms } -func (Dnsmasq) Cleanup() {} +func (d *Dnsmasq) Cleanup() {} diff --git a/modules/dnsmasq/dnsmasq_test.go b/modules/dnsmasq/dnsmasq_test.go index b4f0bb555..4484388cb 100644 --- a/modules/dnsmasq/dnsmasq_test.go +++ b/modules/dnsmasq/dnsmasq_test.go @@ -54,9 +54,9 @@ func TestDnsmasq_Init(t *testing.T) { ns.Config = test.config if test.wantFail { - assert.False(t, ns.Init()) + assert.Error(t, ns.Init()) } else { - assert.True(t, ns.Init()) + assert.NoError(t, ns.Init()) } }) } @@ -83,12 +83,12 @@ func TestDnsmasq_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dnsmasq := test.prepare() - require.True(t, dnsmasq.Init()) + require.NoError(t, dnsmasq.Init()) if test.wantFail { - assert.False(t, dnsmasq.Check()) + assert.Error(t, dnsmasq.Check()) } else { - assert.True(t, dnsmasq.Check()) + assert.NoError(t, dnsmasq.Check()) } }) } @@ -96,7 +96,7 @@ func TestDnsmasq_Check(t *testing.T) { func TestDnsmasq_Charts(t *testing.T) { dnsmasq := New() - require.True(t, dnsmasq.Init()) + require.NoError(t, dnsmasq.Init()) assert.NotNil(t, dnsmasq.Charts()) } @@ -133,7 +133,7 @@ func TestDnsmasq_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dnsmasq := test.prepare() - require.True(t, dnsmasq.Init()) + require.NoError(t, dnsmasq.Init()) collected := dnsmasq.Collect() diff --git a/modules/dnsmasq/init.go b/modules/dnsmasq/init.go index 2ce4790ae..9ceb3ead5 100644 --- a/modules/dnsmasq/init.go +++ b/modules/dnsmasq/init.go @@ -9,7 +9,7 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -func (d Dnsmasq) validateConfig() error { +func (d *Dnsmasq) validateConfig() error { if d.Address == "" { return errors.New("'address' parameter not set") } @@ -19,11 +19,11 @@ func (d Dnsmasq) validateConfig() error { return nil } -func (d Dnsmasq) initDNSClient() (dnsClient, error) { - return d.newDNSClient(d.Protocol, d.Timeout.Duration), nil +func (d *Dnsmasq) initDNSClient() (dnsClient, error) { + return d.newDNSClient(d.Protocol, d.Timeout.Duration()), nil } -func (d Dnsmasq) initCharts() (*module.Charts, error) { +func (d *Dnsmasq) initCharts() (*module.Charts, error) { return cacheCharts.Copy(), nil } diff --git a/modules/dnsmasq_dhcp/config_schema.json b/modules/dnsmasq_dhcp/config_schema.json index bb9d76813..03b00a321 100644 --- a/modules/dnsmasq_dhcp/config_schema.json +++ b/modules/dnsmasq_dhcp/config_schema.json @@ -1,23 +1,35 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dnsmasq_dhcp job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Dnsmasq DHCP collector configuration.", + "type": "object", + "properties": { + "leases_path": { + "title": "Leases path", + "description": "Path to dnsmasq DHCP leases file.", + "type": "string", + "default": "/var/lib/misc/dnsmasq.leases" + }, + "conf_path": { + "title": "Config path", + "description": "Path to dnsmasq configuration file.", + "type": "string", + "default": "/etc/dnsmasq.conf" + }, + "conf_dir": { + "title": "Config directory path", + "description": "Path to dnsmasq configuration directory.", + "type": "string", + "default": "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new" + } }, - "leases_path": { - "type": "string" - }, - "conf_path": { - "type": "string" - }, - "conf_dir": { - "type": "string" - } + "required": [ + "leases_path" + ] }, - "required": [ - "name", - "leases_path" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/dnsmasq_dhcp/dhcp.go b/modules/dnsmasq_dhcp/dhcp.go index ede8a8ee8..55a107e2a 100644 --- a/modules/dnsmasq_dhcp/dhcp.go +++ b/modules/dnsmasq_dhcp/dhcp.go @@ -4,6 +4,7 @@ package dnsmasq_dhcp import ( _ "embed" + "errors" "net" "time" @@ -22,15 +23,13 @@ func init() { } func New() *DnsmasqDHCP { - config := Config{ - // debian defaults - LeasesPath: "/var/lib/misc/dnsmasq.leases", - ConfPath: "/etc/dnsmasq.conf", - ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new", - } - return &DnsmasqDHCP{ - Config: config, + Config: Config{ + // debian defaults + LeasesPath: "/var/lib/misc/dnsmasq.leases", + ConfPath: "/etc/dnsmasq.conf", + ConfDir: "/etc/dnsmasq.d,.dpkg-dist,.dpkg-old,.dpkg-new", + }, charts: charts.Copy(), parseConfigEvery: time.Minute, cacheDHCPRanges: make(map[string]bool), @@ -39,6 +38,8 @@ func New() *DnsmasqDHCP { } type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` + LeasesPath string `yaml:"leases_path"` ConfPath string `yaml:"conf_path"` ConfDir string `yaml:"conf_dir"` @@ -63,21 +64,34 @@ type DnsmasqDHCP struct { mx map[string]int64 } -func (d *DnsmasqDHCP) Init() bool { +func (d *DnsmasqDHCP) Configuration() any { + return d.Config +} + +func (d *DnsmasqDHCP) Init() error { if err := d.validateConfig(); err != nil { d.Errorf("config validation: %v", err) - return false + return err } if err := d.checkLeasesPath(); err != nil { d.Errorf("leases path check: %v", err) - return false + return err } - return true + return nil } -func (d *DnsmasqDHCP) Check() bool { - return len(d.Collect()) > 0 +func (d *DnsmasqDHCP) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *DnsmasqDHCP) Charts() *module.Charts { diff --git a/modules/dnsmasq_dhcp/dhcp_test.go b/modules/dnsmasq_dhcp/dhcp_test.go index 9e7693fa9..8de947bc9 100644 --- a/modules/dnsmasq_dhcp/dhcp_test.go +++ b/modules/dnsmasq_dhcp/dhcp_test.go @@ -27,14 +27,14 @@ func TestDnsmasqDHCP_Init(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestDnsmasqDHCP_InitEmptyLeasesPath(t *testing.T) { job := New() job.LeasesPath = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) { @@ -42,7 +42,7 @@ func TestDnsmasqDHCP_InitInvalidLeasesPath(t *testing.T) { job.LeasesPath = testLeasesPath job.LeasesPath += "!" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) { @@ -51,7 +51,7 @@ func TestDnsmasqDHCP_InitZeroDHCPRanges(t *testing.T) { job.ConfPath = "testdata/dnsmasq3.conf" job.ConfDir = "" - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestDnsmasqDHCP_Check(t *testing.T) { @@ -60,8 +60,8 @@ func TestDnsmasqDHCP_Check(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestDnsmasqDHCP_Charts(t *testing.T) { @@ -70,7 +70,7 @@ func TestDnsmasqDHCP_Charts(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.Charts()) } @@ -85,8 +85,8 @@ func TestDnsmasqDHCP_Collect(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "dhcp_range_1230::1-1230::64_allocated_leases": 7, @@ -126,8 +126,8 @@ func TestDnsmasqDHCP_CollectFailedToOpenLeasesPath(t *testing.T) { job.ConfPath = testConfPath job.ConfDir = testConfDir - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) job.LeasesPath = "" assert.Nil(t, job.Collect()) diff --git a/modules/dnsquery/collect.go b/modules/dnsquery/collect.go index 46104e944..a98e37cad 100644 --- a/modules/dnsquery/collect.go +++ b/modules/dnsquery/collect.go @@ -14,7 +14,7 @@ import ( func (d *DNSQuery) collect() (map[string]int64, error) { if d.dnsClient == nil { - d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration) + d.dnsClient = d.newDNSClient(d.Network, d.Timeout.Duration()) } mx := make(map[string]int64) diff --git a/modules/dnsquery/config_schema.json b/modules/dnsquery/config_schema.json index 4a7fa412a..4ea6637a8 100644 --- a/modules/dnsquery/config_schema.json +++ b/modules/dnsquery/config_schema.json @@ -1,48 +1,101 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dns_query job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "domains": { - "type": "array", - "items": { - "type": "string" - } - }, - "servers": { - "type": "array", - "items": { - "type": "string" - } - }, - "network": { - "type": "string" - }, - "record_type": { - "type": "string" - }, - "record_types": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DNS query collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "minimum": 1, + "default": 5, + "type": "integer" + }, + "timeout": { + "title": "Timeout", + "description": "DNS query timeout.", + "type": "number", + "default": 2 + }, + "domains": { + "title": "Domains", + "description": "Domain or subdomains to query. At each iteration, a random domain will be selected from the list.", + "type": "array", + "items": { + "title": "Domain", + "type": "string" + }, + "default": [ + "google.com", + "github.com" + ], + "uniqueItems": true + }, + "servers": { + "title": "Servers", + "description": "List of DNS servers that will be queried.", + "type": "array", + "items": { + "title": "DNS server", + "description": "IP address or hostname of the DNS server.", + "type": "string" + }, + "default": [ + "8.8.8.8" + ], + "uniqueItems": true + }, + "record_types": { + "type": "array", + "items": { + "type": "string", + "enum": [ + "A", + "AAAA", + "CNAME", + "MX", + "NS", + "PTR", + "TXT", + "SOA", + "SPF", + "TXT", + "SRV" + ], + "default": "A" + }, + "default": [ + "A" + ], + "uniqueItems": true + }, + "network": { + "title": "Protocol", + "description": "Network protocol. Supported protocols: udp, tcp, tcp-tls.", + "type": "string", + "enum": [ + "udp", + "tcp", + "tcp-tls" + ], + "default": "udp" + }, + "port": { + "title": "Port", + "description": "DNS server port.", + "type": "integer", + "default": 53 } }, - "port": { - "type": "integer" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "domains", + "servers", + "network" + ] }, - "required": [ - "name", - "domains", - "servers" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/dnsquery/dnsquery.go b/modules/dnsquery/dnsquery.go index dd1cd3c66..b45a53230 100644 --- a/modules/dnsquery/dnsquery.go +++ b/modules/dnsquery/dnsquery.go @@ -28,7 +28,7 @@ func init() { func New() *DNSQuery { return &DNSQuery{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), Network: "udp", RecordTypes: []string{"A"}, Port: 53, @@ -43,13 +43,15 @@ func New() *DNSQuery { } type Config struct { - Domains []string `yaml:"domains"` - Servers []string `yaml:"servers"` - Network string `yaml:"network"` - RecordType string `yaml:"record_type"` - RecordTypes []string `yaml:"record_types"` - Port int `yaml:"port"` - Timeout web.Duration `yaml:"timeout"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + Domains []string `yaml:"domains" json:"domains"` + Servers []string `yaml:"servers" json:"servers"` + Network string `yaml:"network" json:"network"` + RecordType string `yaml:"record_type" json:"record_type"` + RecordTypes []string `yaml:"record_types" json:"record_types"` + Port int `yaml:"port" json:"port"` + Timeout web.Duration `yaml:"timeout" json:"timeout"` } type ( @@ -71,31 +73,35 @@ type ( } ) -func (d *DNSQuery) Init() bool { +func (d *DNSQuery) Configuration() any { + return d.Config +} + +func (d *DNSQuery) Init() error { if err := d.verifyConfig(); err != nil { d.Errorf("config validation: %v", err) - return false + return err } rt, err := d.initRecordTypes() if err != nil { d.Errorf("init record type: %v", err) - return false + return err } d.recordTypes = rt charts, err := d.initCharts() if err != nil { d.Errorf("init charts: %v", err) - return false + return err } d.charts = charts - return true + return nil } -func (d *DNSQuery) Check() bool { - return true +func (d *DNSQuery) Check() error { + return nil } func (d *DNSQuery) Charts() *module.Charts { diff --git a/modules/dnsquery/dnsquery_test.go b/modules/dnsquery/dnsquery_test.go index 5ba841731..7dbd06aca 100644 --- a/modules/dnsquery/dnsquery_test.go +++ b/modules/dnsquery/dnsquery_test.go @@ -32,7 +32,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "success when using deprecated record_type": { @@ -43,7 +43,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordType: "A", Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail with default": { @@ -58,7 +58,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail when servers not set": { @@ -69,7 +69,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail when network is invalid": { @@ -80,7 +80,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "gcp", RecordTypes: []string{"A"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, "fail when record_type is invalid": { @@ -91,7 +91,7 @@ func TestDNSQuery_Init(t *testing.T) { Network: "udp", RecordTypes: []string{"B"}, Port: 53, - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, } @@ -102,9 +102,9 @@ func TestDNSQuery_Init(t *testing.T) { dq.Config = test.config if test.wantFail { - assert.False(t, dq.Init()) + assert.Error(t, dq.Init()) } else { - assert.True(t, dq.Init()) + assert.NoError(t, dq.Init()) } }) } @@ -129,12 +129,12 @@ func TestDNSQuery_Check(t *testing.T) { t.Run(name, func(t *testing.T) { dq := test.prepare() - require.True(t, dq.Init()) + require.NoError(t, dq.Init()) if test.wantFail { - assert.False(t, dq.Check()) + assert.Error(t, dq.Check()) } else { - assert.True(t, dq.Check()) + assert.NoError(t, dq.Check()) } }) } @@ -145,7 +145,7 @@ func TestDNSQuery_Charts(t *testing.T) { dq.Domains = []string{"google.com"} dq.Servers = []string{"192.0.2.0", "192.0.2.1"} - require.True(t, dq.Init()) + require.NoError(t, dq.Init()) assert.NotNil(t, dq.Charts()) assert.Len(t, *dq.Charts(), len(dnsChartsTmpl)*len(dq.Servers)) @@ -186,7 +186,7 @@ func TestDNSQuery_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { dq := test.prepare() - require.True(t, dq.Init()) + require.NoError(t, dq.Init()) mx := dq.Collect() diff --git a/modules/docker/collect.go b/modules/docker/collect.go index ceda40671..fe4b6b45e 100644 --- a/modules/docker/collect.go +++ b/modules/docker/collect.go @@ -43,7 +43,7 @@ func (d *Docker) collect() (map[string]int64, error) { } func (d *Docker) collectInfo(mx map[string]int64) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() info, err := d.client.Info(ctx) @@ -59,7 +59,7 @@ func (d *Docker) collectInfo(mx map[string]int64) error { } func (d *Docker) collectImages(mx map[string]int64) error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() images, err := d.client.ImageList(ctx, types.ImageListOptions{}) @@ -106,7 +106,7 @@ func (d *Docker) collectContainers(mx map[string]int64) error { for _, status := range containerHealthStatuses { if err := func() error { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() v, err := d.client.ContainerList(ctx, types.ContainerListOptions{ @@ -191,7 +191,7 @@ func (d *Docker) collectContainers(mx map[string]int64) error { } func (d *Docker) negotiateAPIVersion() { - ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), d.Timeout.Duration()) defer cancel() d.client.NegotiateAPIVersion(ctx) diff --git a/modules/docker/config_schema.json b/modules/docker/config_schema.json index b060da819..10417790a 100644 --- a/modules/docker/config_schema.json +++ b/modules/docker/config_schema.json @@ -1,26 +1,42 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/docker job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Docker collector configuration.", + "type": "object", + "properties": { + "address": { + "title": "Address", + "description": "Docker daemon's listening address. When using a TCP socket, the format is: tcp://{ip}:{port}.", + "type": "string", + "default": "unix:///var/run/docker.sock" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "minimum": 1, + "default": 1, + "type": "integer" + }, + "timeout": { + "title": "Timeout", + "description": "Connection timeout.", + "type": "number", + "default": 5 + }, + "collect_container_size": { + "title": "Collect container size", + "description": "Collect container writable layer size.", + "type": "boolean", + "default": false + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "collect_container_size": { - "type": "boolean" - } + "required": [ + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/docker/docker.go b/modules/docker/docker.go index 1078de2fb..61ec5529a 100644 --- a/modules/docker/docker.go +++ b/modules/docker/docker.go @@ -5,6 +5,7 @@ package docker import ( "context" _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -28,7 +29,7 @@ func New() *Docker { return &Docker{ Config: Config{ Address: docker.DefaultDockerHost, - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), CollectContainerSize: false, }, @@ -41,9 +42,11 @@ func New() *Docker { } type Config struct { - Timeout web.Duration `yaml:"timeout"` - Address string `yaml:"address"` - CollectContainerSize bool `yaml:"collect_container_size"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + Address string `yaml:"address" json:"address"` + Timeout web.Duration `yaml:"timeout" json:"timeout"` + CollectContainerSize bool `yaml:"collect_container_size" json:"collect_container_size"` } type ( @@ -68,12 +71,25 @@ type ( } ) -func (d *Docker) Init() bool { - return true +func (d *Docker) Configuration() any { + return d.Config +} + +func (d *Docker) Init() error { + return nil } -func (d *Docker) Check() bool { - return len(d.Collect()) > 0 +func (d *Docker) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (d *Docker) Charts() *module.Charts { diff --git a/modules/docker/docker_test.go b/modules/docker/docker_test.go index 0a3711b4d..083f1400f 100644 --- a/modules/docker/docker_test.go +++ b/modules/docker/docker_test.go @@ -35,9 +35,9 @@ func TestDocker_Init(t *testing.T) { d.Config = test.config if test.wantFail { - assert.False(t, d.Init()) + assert.Error(t, d.Init()) } else { - assert.True(t, d.Init()) + assert.NoError(t, d.Init()) } }) } @@ -58,15 +58,15 @@ func TestDocker_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(d *Docker) { d.Init() }, + prepare: func(d *Docker) { _ = d.Init() }, }, "after Check": { wantClose: true, - prepare: func(d *Docker) { d.Init(); d.Check() }, + prepare: func(d *Docker) { _ = d.Init(); _ = d.Check() }, }, "after Collect": { wantClose: true, - prepare: func(d *Docker) { d.Init(); d.Collect() }, + prepare: func(d *Docker) { _ = d.Init(); d.Collect() }, }, } @@ -136,12 +136,12 @@ func TestDocker_Check(t *testing.T) { t.Run(name, func(t *testing.T) { d := test.prepare() - require.True(t, d.Init()) + require.NoError(t, d.Init()) if test.wantFail { - assert.False(t, d.Check()) + assert.Error(t, d.Check()) } else { - assert.True(t, d.Check()) + assert.NoError(t, d.Check()) } }) } @@ -666,7 +666,7 @@ func TestDocker_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { d := test.prepare() - require.True(t, d.Init()) + require.NoError(t, d.Init()) mx := d.Collect() diff --git a/modules/docker_engine/config_schema.json b/modules/docker_engine/config_schema.json index 2b8505610..25ff5ee53 100644 --- a/modules/docker_engine/config_schema.json +++ b/modules/docker_engine/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/docker_engine job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Apache/HTTPd collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Docker Engine metrics page to monitor.", + "type": "string", + "default": "http://127.0.0.1:9323/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/docker_engine/docker_engine.go b/modules/docker_engine/docker_engine.go index 7c69daa29..48d8b5073 100644 --- a/modules/docker_engine/docker_engine.go +++ b/modules/docker_engine/docker_engine.go @@ -24,69 +24,69 @@ func init() { } func New() *DockerEngine { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:9323/metrics", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &DockerEngine{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:9323/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, } - return &DockerEngine{ - Config: config, - } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } - DockerEngine struct { - module.Base - Config `yaml:",inline"` +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` - prom prometheus.Prometheus - isSwarmManager bool - hasContainerStates bool - } -) + web.HTTP `yaml:",inline" json:",inline"` +} -func (de DockerEngine) validateConfig() error { - if de.URL == "" { - return errors.New("URL is not set") - } - return nil +type DockerEngine struct { + module.Base + Config `yaml:",inline" json:",inline"` + + prom prometheus.Prometheus + isSwarmManager bool + hasContainerStates bool +} + +func (de *DockerEngine) Configuration() any { + return de.Config } -func (de *DockerEngine) initClient() error { - client, err := web.NewHTTPClient(de.Client) +func (de *DockerEngine) Init() error { + if err := de.validateConfig(); err != nil { + de.Errorf("config validation: %v", err) + return err + } + + prom, err := de.initPrometheusClient() if err != nil { + de.Error(err) return err } + de.prom = prom - de.prom = prometheus.New(client, de.Request) return nil } -func (de *DockerEngine) Init() bool { - if err := de.validateConfig(); err != nil { - de.Errorf("config validation: %v", err) - return false - } - if err := de.initClient(); err != nil { - de.Errorf("client initialization: %v", err) - return false +func (de *DockerEngine) Check() error { + mx, err := de.collect() + if err != nil { + de.Error(err) + return err } - return true -} + if len(mx) == 0 { + return errors.New("no metrics collected") -func (de *DockerEngine) Check() bool { - return len(de.Collect()) > 0 + } + return nil } -func (de DockerEngine) Charts() *Charts { +func (de *DockerEngine) Charts() *Charts { cs := charts.Copy() if !de.hasContainerStates { if err := cs.Remove("engine_daemon_container_states_containers"); err != nil { @@ -101,6 +101,7 @@ func (de DockerEngine) Charts() *Charts { if err := cs.Add(*swarmManagerCharts.Copy()...); err != nil { de.Warning(err) } + return cs } @@ -117,4 +118,8 @@ func (de *DockerEngine) Collect() map[string]int64 { return mx } -func (DockerEngine) Cleanup() {} +func (de *DockerEngine) Cleanup() { + if de.prom != nil && de.prom.HTTPClient() != nil { + de.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/docker_engine/docker_engine_test.go b/modules/docker_engine/docker_engine_test.go index 7ffc1ce5e..42ffe4625 100644 --- a/modules/docker_engine/docker_engine_test.go +++ b/modules/docker_engine/docker_engine_test.go @@ -64,9 +64,9 @@ func TestDockerEngine_Init(t *testing.T) { dockerEngine.Config = test.config if test.wantFail { - assert.False(t, dockerEngine.Init()) + assert.Error(t, dockerEngine.Init()) } else { - assert.True(t, dockerEngine.Init()) + assert.NoError(t, dockerEngine.Init()) } }) } @@ -92,9 +92,9 @@ func TestDockerEngine_Check(t *testing.T) { defer srv.Close() if test.wantFail { - assert.False(t, dockerEngine.Check()) + assert.Error(t, dockerEngine.Check()) } else { - assert.True(t, dockerEngine.Check()) + assert.NoError(t, dockerEngine.Check()) } }) } @@ -115,7 +115,7 @@ func TestDockerEngine_Charts(t *testing.T) { dockerEngine, srv := test.prepare(t) defer srv.Close() - require.True(t, dockerEngine.Check()) + require.NoError(t, dockerEngine.Check()) assert.Len(t, *dockerEngine.Charts(), test.wantNumCharts) }) } @@ -276,7 +276,7 @@ func prepareClientServerV17050CE(t *testing.T) (*DockerEngine, *httptest.Server) dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -290,7 +290,7 @@ func prepareClientServerV18093CE(t *testing.T) (*DockerEngine, *httptest.Server) dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -304,7 +304,7 @@ func prepareClientServerV18093CESwarm(t *testing.T) (*DockerEngine, *httptest.Se dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -318,7 +318,7 @@ func prepareClientServerNonDockerEngine(t *testing.T) (*DockerEngine, *httptest. dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -332,7 +332,7 @@ func prepareClientServerInvalidData(t *testing.T) (*DockerEngine, *httptest.Serv dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -346,7 +346,7 @@ func prepareClientServer404(t *testing.T) (*DockerEngine, *httptest.Server) { dockerEngine := New() dockerEngine.URL = srv.URL - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } @@ -357,7 +357,7 @@ func prepareClientServerConnectionRefused(t *testing.T) (*DockerEngine, *httptes dockerEngine := New() dockerEngine.URL = "http://127.0.0.1:38001/metrics" - require.True(t, dockerEngine.Init()) + require.NoError(t, dockerEngine.Init()) return dockerEngine, srv } diff --git a/modules/docker_engine/init.go b/modules/docker_engine/init.go new file mode 100644 index 000000000..b3ceefdea --- /dev/null +++ b/modules/docker_engine/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package docker_engine + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/web" + + "github.com/netdata/go.d.plugin/pkg/prometheus" +) + +func (de *DockerEngine) validateConfig() error { + if de.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (de *DockerEngine) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(de.Client) + if err != nil { + return nil, err + } + return prometheus.New(client, de.Request), nil +} diff --git a/modules/dockerhub/config_schema.json b/modules/dockerhub/config_schema.json index 1be293e6f..3ce503588 100644 --- a/modules/dockerhub/config_schema.json +++ b/modules/dockerhub/config_schema.json @@ -1,65 +1,254 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/dockerhub job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "DockerHub collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "url": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "default": "https://hub.docker.com/v2/repositories", + "title": "URL", + "description": "The URL of the DockerHub repositories endpoint.", + "type": "string" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "repositories": { + "title": "Repositories", + "description": "List of repositories to monitor.", + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "required": [ + "url", + "repositories" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "repositories": { - "type": "array", - "items": { - "type": "number" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true } }, - "username": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "repositories" - ] + } } diff --git a/modules/dockerhub/dockerhub.go b/modules/dockerhub/dockerhub.go index 48836a606..ca3148f1e 100644 --- a/modules/dockerhub/dockerhub.go +++ b/modules/dockerhub/dockerhub.go @@ -4,6 +4,7 @@ package dockerhub import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -11,13 +12,6 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -const ( - defaultURL = "https://hub.docker.com/v2/repositories" - defaultHTTPTimeout = time.Second * 2 - - defaultUpdateEvery = 5 -) - //go:embed "config_schema.json" var configSchema string @@ -25,7 +19,7 @@ func init() { module.Register("dockerhub", module.Creator{ JobConfigSchema: configSchema, Defaults: module.Defaults{ - UpdateEvery: defaultUpdateEvery, + UpdateEvery: 5, }, Create: func() module.Module { return New() }, }) @@ -33,66 +27,73 @@ func init() { // New creates DockerHub with default values. func New() *DockerHub { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &DockerHub{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "https://hub.docker.com/v2/repositories", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, } - return &DockerHub{ - Config: config, - } } // Config is the DockerHub module configuration. type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` Repositories []string } // DockerHub DockerHub module. type DockerHub struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` + client *apiClient } -// Cleanup makes cleanup. -func (DockerHub) Cleanup() {} +func (dh *DockerHub) Configuration() any { + return dh.Config +} // Init makes initialization. -func (dh *DockerHub) Init() bool { - if dh.URL == "" { - dh.Error("URL not set") - return false - } - - if len(dh.Repositories) == 0 { - dh.Error("repositories parameter is not set") - return false +func (dh *DockerHub) Init() error { + if err := dh.validateConfig(); err != nil { + dh.Errorf("config validation: %v", err) + return err } - client, err := web.NewHTTPClient(dh.Client) + client, err := dh.initApiClient() if err != nil { - dh.Errorf("error on creating http client : %v", err) - return false + dh.Error(err) + return err } - dh.client = newAPIClient(client, dh.Request) + dh.client = client - return true + return nil } // Check makes check. -func (dh DockerHub) Check() bool { - return len(dh.Collect()) > 0 +func (dh *DockerHub) Check() error { + mx, err := dh.collect() + if err != nil { + dh.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } // Charts creates Charts. -func (dh DockerHub) Charts() *Charts { +func (dh *DockerHub) Charts() *Charts { cs := charts.Copy() addReposToCharts(dh.Repositories, cs) return cs @@ -109,3 +110,10 @@ func (dh *DockerHub) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (dh *DockerHub) Cleanup() { + if dh.client != nil && dh.client.httpClient != nil { + dh.client.httpClient.CloseIdleConnections() + } +} diff --git a/modules/dockerhub/dockerhub_test.go b/modules/dockerhub/dockerhub_test.go index 350af1a53..d5e2626a0 100644 --- a/modules/dockerhub/dockerhub_test.go +++ b/modules/dockerhub/dockerhub_test.go @@ -19,16 +19,6 @@ var ( repo3Data, _ = os.ReadFile("testdata/repo3.txt") ) -func TestNew(t *testing.T) { - job := New() - - assert.IsType(t, (*DockerHub)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) - assert.Len(t, job.Repositories, 0) - assert.Nil(t, job.client) -} - func TestDockerHub_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } func TestDockerHub_Cleanup(t *testing.T) { New().Cleanup() } @@ -36,11 +26,13 @@ func TestDockerHub_Cleanup(t *testing.T) { New().Cleanup() } func TestDockerHub_Init(t *testing.T) { job := New() job.Repositories = []string{"name/repo"} - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.client) } -func TestDockerHub_InitNG(t *testing.T) { assert.False(t, New().Init()) } +func TestDockerHub_InitNG(t *testing.T) { + assert.Error(t, New().Init()) +} func TestDockerHub_Check(t *testing.T) { ts := httptest.NewServer( @@ -60,16 +52,16 @@ func TestDockerHub_Check(t *testing.T) { job := New() job.URL = ts.URL job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestDockerHub_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestDockerHub_Collect(t *testing.T) { @@ -90,8 +82,8 @@ func TestDockerHub_Collect(t *testing.T) { job := New() job.URL = ts.URL job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "star_count_user1/name1": 45, @@ -127,8 +119,8 @@ func TestDockerHub_InvalidData(t *testing.T) { job := New() job.URL = ts.URL job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestDockerHub_404(t *testing.T) { @@ -141,6 +133,6 @@ func TestDockerHub_404(t *testing.T) { job := New() job.Repositories = []string{"name1/repo1", "name2/repo2", "name3/repo3"} - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/dockerhub/init.go b/modules/dockerhub/init.go new file mode 100644 index 000000000..17f2e712e --- /dev/null +++ b/modules/dockerhub/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package dockerhub + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (dh *DockerHub) validateConfig() error { + if dh.URL == "" { + return errors.New("url not set") + } + if len(dh.Repositories) == 0 { + return errors.New("repositories not set") + } + return nil +} + +func (dh *DockerHub) initApiClient() (*apiClient, error) { + client, err := web.NewHTTPClient(dh.Client) + if err != nil { + return nil, err + } + return newAPIClient(client, dh.Request), nil +} diff --git a/modules/elasticsearch/config_schema.json b/modules/elasticsearch/config_schema.json index f69eb6e43..68449ab87 100644 --- a/modules/elasticsearch/config_schema.json +++ b/modules/elasticsearch/config_schema.json @@ -1,74 +1,274 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/elasticsearch job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "cluster_mode": { - "type": "boolean" - }, - "collect_node_stats": { - "type": "boolean" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Elasticsearch/OpenSearch collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "collect_cluster_health": { - "type": "boolean" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "collect_cluster_stats": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The base URL of the Elasticsearch cluster.", + "type": "string", + "default": "http://127.0.0.1:9200" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 5 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 5 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "cluster_mode": { + "title": "Cluster mode", + "description": "Controls whether to collect metrics for all nodes in the Elasticsearch cluster or only for the local node where the collector is running.", + "type": "boolean", + "default": false + }, + "collect_node_stats": { + "title": "Collect node stats", + "description": "Collect metrics about individual nodes in the cluster.", + "type": "boolean", + "default": true + }, + "collect_cluster_health": { + "title": "Collect cluster health", + "description": "Collect metrics about the overall health of the cluster.", + "type": "boolean", + "default": true + }, + "collect_cluster_stats": { + "title": "Collect cluster stats", + "description": "Collect high-level cluster statistics.", + "type": "boolean", + "default": true + }, + "collect_indices_stats": { + "title": "Collect indices stats", + "description": "Collect metrics about individual indices in the cluster.", + "type": "boolean", + "default": false + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "collect_indices_stats": { - "type": "boolean" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "username": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/elasticsearch/elasticsearch.go b/modules/elasticsearch/elasticsearch.go index 4b29a6cc8..b452c0187 100644 --- a/modules/elasticsearch/elasticsearch.go +++ b/modules/elasticsearch/elasticsearch.go @@ -4,6 +4,7 @@ package elasticsearch import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -34,7 +35,7 @@ func New() *Elasticsearch { URL: "http://127.0.0.1:9200", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, ClusterMode: false, @@ -54,17 +55,19 @@ func New() *Elasticsearch { } type Config struct { - web.HTTP `yaml:",inline"` - ClusterMode bool `yaml:"cluster_mode"` - DoNodeStats bool `yaml:"collect_node_stats"` - DoClusterHealth bool `yaml:"collect_cluster_health"` - DoClusterStats bool `yaml:"collect_cluster_stats"` - DoIndicesStats bool `yaml:"collect_indices_stats"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` + ClusterMode bool `yaml:"cluster_mode" json:"cluster_mode"` + DoNodeStats bool `yaml:"collect_node_stats" json:"collect_node_stats"` + DoClusterHealth bool `yaml:"collect_cluster_health" json:"collect_cluster_health"` + DoClusterStats bool `yaml:"collect_cluster_stats" json:"collect_cluster_stats"` + DoIndicesStats bool `yaml:"collect_indices_stats" json:"collect_indices_stats"` } type Elasticsearch struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` httpClient *http.Client charts *module.Charts @@ -78,25 +81,38 @@ type Elasticsearch struct { indices map[string]bool } -func (es *Elasticsearch) Init() bool { +func (es *Elasticsearch) Configuration() any { + return es.Config +} + +func (es *Elasticsearch) Init() error { err := es.validateConfig() if err != nil { es.Errorf("check configuration: %v", err) - return false + return err } httpClient, err := es.initHTTPClient() if err != nil { es.Errorf("init HTTP client: %v", err) - return false + return err } es.httpClient = httpClient - return true + return nil } -func (es *Elasticsearch) Check() bool { - return len(es.Collect()) > 0 +func (es *Elasticsearch) Check() error { + mx, err := es.collect() + if err != nil { + es.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (es *Elasticsearch) Charts() *module.Charts { diff --git a/modules/elasticsearch/elasticsearch_test.go b/modules/elasticsearch/elasticsearch_test.go index d4f1628cd..1a6d178cc 100644 --- a/modules/elasticsearch/elasticsearch_test.go +++ b/modules/elasticsearch/elasticsearch_test.go @@ -103,9 +103,9 @@ func TestElasticsearch_Init(t *testing.T) { es.Config = test.config if test.wantFail { - assert.False(t, es.Init()) + assert.Error(t, es.Init()) } else { - assert.True(t, es.Init()) + assert.NoError(t, es.Init()) } }) } @@ -128,9 +128,9 @@ func TestElasticsearch_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, es.Check()) + assert.Error(t, es.Check()) } else { - assert.True(t, es.Check()) + assert.NoError(t, es.Check()) } }) } @@ -666,7 +666,7 @@ func prepareElasticsearch(t *testing.T, createES func() *Elasticsearch) (es *Ela es = createES() es.URL = srv.URL - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, srv.Close } @@ -683,7 +683,7 @@ func prepareElasticsearchInvalidData(t *testing.T) (*Elasticsearch, func()) { })) es := New() es.URL = srv.URL - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, srv.Close } @@ -696,7 +696,7 @@ func prepareElasticsearch404(t *testing.T) (*Elasticsearch, func()) { })) es := New() es.URL = srv.URL - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, srv.Close } @@ -705,7 +705,7 @@ func prepareElasticsearchConnectionRefused(t *testing.T) (*Elasticsearch, func() t.Helper() es := New() es.URL = "http://127.0.0.1:38001" - require.True(t, es.Init()) + require.NoError(t, es.Init()) return es, func() {} } diff --git a/modules/energid/README.md b/modules/energid/README.md deleted file mode 120000 index 894468aae..000000000 --- a/modules/energid/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/energi_core_wallet.md \ No newline at end of file diff --git a/modules/energid/charts.go b/modules/energid/charts.go deleted file mode 100644 index 3dcc252af..000000000 --- a/modules/energid/charts.go +++ /dev/null @@ -1,97 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package energid - -import "github.com/netdata/go.d.plugin/agent/module" - -var charts = module.Charts{ - // getblockchaininfo (blockchain processing) - { - ID: "blockindex", - Title: "Blockchain index", - Units: "count", - Fam: "blockchain", - Ctx: "energid.blockindex", - Type: module.Area, - Dims: module.Dims{ - {ID: "blockchain_blocks", Name: "blocks"}, - {ID: "blockchain_headers", Name: "headers"}, - }, - }, - { - ID: "difficulty", - Title: "Blockchain difficulty", - Units: "difficulty", - Fam: "blockchain", - Ctx: "energid.difficulty", - Dims: module.Dims{ - {ID: "blockchain_difficulty", Name: "difficulty", Div: 1000}, - }, - }, - - // getmempoolinfo (state of the TX memory pool) - { - ID: "mempool", - Title: "Memory pool", - Units: "bytes", - Fam: "memory", - Ctx: "energid.mempool", - Type: module.Area, - Dims: module.Dims{ - {ID: "mempool_max", Name: "max"}, - {ID: "mempool_current", Name: "usage"}, - {ID: "mempool_txsize", Name: "tx_size"}, - }, - }, - - // getmemoryinfo - { - ID: "secmem", - Title: "Secure memory", - Units: "bytes", - Fam: "memory", - Ctx: "energid.secmem", - Type: module.Area, - Dims: module.Dims{ - {ID: "secmem_total", Name: "total"}, - {ID: "secmem_used", Name: "used"}, - {ID: "secmem_free", Name: "free"}, - {ID: "secmem_locked", Name: "locked"}, - }, - }, - - // getnetworkinfo (P2P networking) - { - ID: "network", - Title: "Network", - Units: "connections", - Fam: "network", - Ctx: "energid.network", - Dims: module.Dims{ - {ID: "network_connections", Name: "connections"}, - }, - }, - { - ID: "timeoffset", - Title: "Network time offset", - Units: "seconds", - Fam: "network", - Ctx: "energid.timeoffset", - Dims: module.Dims{ - {ID: "network_timeoffset", Name: "timeoffset"}, - }, - }, - - // gettxoutsetinfo (unspent transaction output set) - { - ID: "utxo_transactions", - Title: "Transactions", - Units: "transactions", - Fam: "utxo", - Ctx: "energid.utxo_transactions", - Dims: module.Dims{ - {ID: "utxo_transactions", Name: "transactions"}, - {ID: "utxo_output_transactions", Name: "output_transactions"}, - }, - }, -} diff --git a/modules/energid/collect.go b/modules/energid/collect.go deleted file mode 100644 index 965ee4b36..000000000 --- a/modules/energid/collect.go +++ /dev/null @@ -1,161 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package energid - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/netdata/go.d.plugin/pkg/stm" - "github.com/netdata/go.d.plugin/pkg/web" -) - -const ( - jsonRPCVersion = "1.1" - - methodGetBlockchainInfo = "getblockchaininfo" - methodGetMemPoolInfo = "getmempoolinfo" - methodGetNetworkInfo = "getnetworkinfo" - methodGetTXOutSetInfo = "gettxoutsetinfo" - methodGetMemoryInfo = "getmemoryinfo" -) - -var infoRequests = rpcRequests{ - {JSONRPC: jsonRPCVersion, ID: 1, Method: methodGetBlockchainInfo}, - {JSONRPC: jsonRPCVersion, ID: 2, Method: methodGetMemPoolInfo}, - {JSONRPC: jsonRPCVersion, ID: 3, Method: methodGetNetworkInfo}, - {JSONRPC: jsonRPCVersion, ID: 4, Method: methodGetTXOutSetInfo}, - {JSONRPC: jsonRPCVersion, ID: 5, Method: methodGetMemoryInfo}, -} - -func (e *Energid) collect() (map[string]int64, error) { - responses, err := e.scrapeEnergid(infoRequests) - if err != nil { - return nil, err - } - - info, err := e.collectInfoResponse(infoRequests, responses) - if err != nil { - return nil, err - } - - return stm.ToMap(info), nil -} - -func (e *Energid) collectInfoResponse(requests rpcRequests, responses rpcResponses) (*energidInfo, error) { - var info energidInfo - for _, req := range requests { - resp := responses.getByID(req.ID) - if resp == nil { - e.Warningf("method '%s' (id %d) not in responses", req.Method, req.ID) - continue - } - - if resp.Error != nil { - e.Warningf("server returned an error on method '%s': %v", req.Method, resp.Error) - continue - } - - var err error - switch req.Method { - case methodGetBlockchainInfo: - info.Blockchain, err = parseBlockchainInfo(resp.Result) - case methodGetMemPoolInfo: - info.MemPool, err = parseMemPoolInfo(resp.Result) - case methodGetNetworkInfo: - info.Network, err = parseNetworkInfo(resp.Result) - case methodGetTXOutSetInfo: - info.TxOutSet, err = parseTXOutSetInfo(resp.Result) - case methodGetMemoryInfo: - info.Memory, err = parseMemoryInfo(resp.Result) - } - if err != nil { - return nil, fmt.Errorf("parse '%s' method result: %v", req.Method, err) - } - } - - return &info, nil -} - -func parseBlockchainInfo(result []byte) (*blockchainInfo, error) { - var m blockchainInfo - if err := json.Unmarshal(result, &m); err != nil { - return nil, err - } - return &m, nil -} - -func parseMemPoolInfo(result []byte) (*memPoolInfo, error) { - var m memPoolInfo - if err := json.Unmarshal(result, &m); err != nil { - return nil, err - } - return &m, nil -} - -func parseNetworkInfo(result []byte) (*networkInfo, error) { - var m networkInfo - if err := json.Unmarshal(result, &m); err != nil { - return nil, err - } - return &m, nil -} - -func parseTXOutSetInfo(result []byte) (*txOutSetInfo, error) { - var m txOutSetInfo - if err := json.Unmarshal(result, &m); err != nil { - return nil, err - } - return &m, nil -} - -func parseMemoryInfo(result []byte) (*memoryInfo, error) { - var m memoryInfo - if err := json.Unmarshal(result, &m); err != nil { - return nil, err - } - return &m, nil -} - -func (e *Energid) scrapeEnergid(requests rpcRequests) (rpcResponses, error) { - req, _ := web.NewHTTPRequest(e.Request) - req.Method = http.MethodPost - req.Header.Set("Content-Type", "application/json") - body, _ := json.Marshal(requests) - req.Body = io.NopCloser(bytes.NewReader(body)) - - var resp rpcResponses - if err := e.doOKDecode(req, &resp); err != nil { - return nil, err - } - - return resp, nil -} - -func (e *Energid) doOKDecode(req *http.Request, in interface{}) error { - resp, err := e.httpClient.Do(req) - if err != nil { - return fmt.Errorf("error on HTTP request '%s': %v", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("'%s' returned HTTP status code: %d", req.URL, resp.StatusCode) - } - - if err := json.NewDecoder(resp.Body).Decode(in); err != nil { - return fmt.Errorf("error on decoding response from '%s': %v", req.URL, err) - } - - return nil -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/modules/energid/config_schema.json b/modules/energid/config_schema.json deleted file mode 100644 index 20f4ec9f8..000000000 --- a/modules/energid/config_schema.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/energid job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } - }, - "required": [ - "name", - "url" - ] -} diff --git a/modules/energid/energid.go b/modules/energid/energid.go deleted file mode 100644 index fcffe50d8..000000000 --- a/modules/energid/energid.go +++ /dev/null @@ -1,104 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package energid - -import ( - _ "embed" - "net/http" - "time" - - "github.com/netdata/go.d.plugin/agent/module" - "github.com/netdata/go.d.plugin/pkg/web" -) - -//go:embed "config_schema.json" -var configSchema string - -func init() { - module.Register("energid", module.Creator{ - JobConfigSchema: configSchema, - Defaults: module.Defaults{ - UpdateEvery: 5, - }, - Create: func() module.Module { return New() }, - }) -} - -type Config struct { - web.HTTP `yaml:",inline"` -} - -type Energid struct { - module.Base - Config `yaml:",inline"` - - httpClient *http.Client - charts *module.Charts -} - -func New() *Energid { - return &Energid{ - Config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:9796", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, - }, - }, - }, - } -} - -func (e *Energid) Init() bool { - err := e.validateConfig() - if err != nil { - e.Errorf("config validation: %v", err) - return false - } - - client, err := e.initHTTPClient() - if err != nil { - e.Errorf("init HTTP client: %v", err) - return false - } - e.httpClient = client - - cs, err := e.initCharts() - if err != nil { - e.Errorf("init charts: %v", err) - return false - } - e.charts = cs - - return true -} - -func (e *Energid) Check() bool { - return len(e.Collect()) > 0 -} - -func (e *Energid) Charts() *module.Charts { - return e.charts -} - -func (e *Energid) Collect() map[string]int64 { - ms, err := e.collect() - if err != nil { - e.Error(err) - } - - if len(ms) == 0 { - return nil - } - - return ms -} - -func (e *Energid) Cleanup() { - if e.httpClient == nil { - return - } - e.httpClient.CloseIdleConnections() -} diff --git a/modules/energid/energid_test.go b/modules/energid/energid_test.go deleted file mode 100644 index ab0e2f24e..000000000 --- a/modules/energid/energid_test.go +++ /dev/null @@ -1,285 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package energid - -import ( - "encoding/json" - "io" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/netdata/go.d.plugin/pkg/tlscfg" - "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - v241GetBlockchainInfo, _ = os.ReadFile("testdata/v2.4.1/getblockchaininfo.json") - v241GetMemPoolInfo, _ = os.ReadFile("testdata/v2.4.1/getmempoolinfo.json") - v241GetNetworkInfo, _ = os.ReadFile("testdata/v2.4.1/getnetworkinfo.json") - v241GetTXOutSetInfo, _ = os.ReadFile("testdata/v2.4.1/gettxoutsetinfo.json") - v241GetMemoryInfo, _ = os.ReadFile("testdata/v2.4.1/getmemoryinfo.json") -) - -func Test_Testdata(t *testing.T) { - for name, data := range map[string][]byte{ - "v241GetBlockchainInfo": v241GetBlockchainInfo, - "v241GetMemPoolInfo": v241GetMemPoolInfo, - "v241GetNetworkInfo": v241GetNetworkInfo, - "v241GetTXOutSetInfo": v241GetTXOutSetInfo, - "v241GetMemoryInfo": v241GetMemoryInfo, - } { - require.NotNilf(t, data, name) - } -} - -func TestNew(t *testing.T) { - assert.IsType(t, (*Energid)(nil), New()) -} - -func Test_Init(t *testing.T) { - tests := map[string]struct { - config Config - wantFail bool - }{ - "success on default config": { - config: New().Config, - }, - "fails on unset URL": { - wantFail: true, - config: Config{ - HTTP: web.HTTP{ - Request: web.Request{URL: ""}, - }, - }, - }, - "fails on invalid TLSCA": { - wantFail: true, - config: Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:38001", - }, - Client: web.Client{ - TLSConfig: tlscfg.TLSConfig{TLSCA: "testdata/tls"}, - }, - }, - }, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - energid := New() - energid.Config = test.config - - if test.wantFail { - assert.False(t, energid.Init()) - } else { - assert.True(t, energid.Init()) - } - }) - } -} - -func Test_Charts(t *testing.T) { - energid := New() - require.True(t, energid.Init()) - assert.NotNil(t, energid.Charts()) -} - -func Test_Cleanup(t *testing.T) { - assert.NotPanics(t, New().Cleanup) -} - -func Test_Check(t *testing.T) { - tests := map[string]struct { - prepare func() (energid *Energid, cleanup func()) - wantFail bool - }{ - "success on valid v2.4.1 response": { - prepare: prepareEnergidV241, - }, - "fails on 404 response": { - wantFail: true, - prepare: prepareEnergid404, - }, - "fails on connection refused": { - wantFail: true, - prepare: prepareEnergidConnectionRefused, - }, - "fails on response with invalid data": { - wantFail: true, - prepare: prepareEnergidInvalidData, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - energid, cleanup := test.prepare() - defer cleanup() - - require.True(t, energid.Init()) - - if test.wantFail { - assert.False(t, energid.Check()) - } else { - assert.True(t, energid.Check()) - } - }) - } -} - -func Test_Collect(t *testing.T) { - tests := map[string]struct { - prepare func() (energid *Energid, cleanup func()) - wantCollected map[string]int64 - }{ - "success on valid v2.4.1 response": { - prepare: prepareEnergidV241, - wantCollected: map[string]int64{ - "blockchain_blocks": 1, - "blockchain_difficulty": 0, - "blockchain_headers": 1, - "mempool_current": 1, - "mempool_max": 300000000, - "mempool_txsize": 1, - "network_connections": 1, - "network_timeoffset": 1, - "secmem_free": 65248, - "secmem_locked": 65536, - "secmem_total": 65536, - "secmem_used": 288, - "utxo_output_transactions": 1, - "utxo_transactions": 1, - }, - }, - "fails on 404 response": { - prepare: prepareEnergid404, - }, - "fails on connection refused": { - prepare: prepareEnergidConnectionRefused, - }, - "fails on response with invalid data": { - prepare: prepareEnergidInvalidData, - }, - } - - for name, test := range tests { - t.Run(name, func(t *testing.T) { - energid, cleanup := test.prepare() - defer cleanup() - require.True(t, energid.Init()) - - collected := energid.Collect() - - assert.Equal(t, test.wantCollected, collected) - if len(test.wantCollected) > 0 { - ensureCollectedHasAllChartsDimsVarsIDs(t, energid, collected) - } - }) - } -} - -func ensureCollectedHasAllChartsDimsVarsIDs(t *testing.T, energid *Energid, ms map[string]int64) { - for _, chart := range *energid.Charts() { - if chart.Obsolete { - continue - } - for _, dim := range chart.Dims { - _, ok := ms[dim.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", dim.ID, chart.ID) - } - for _, v := range chart.Vars { - _, ok := ms[v.ID] - assert.Truef(t, ok, "chart '%s' dim '%s': no dim in collected", v.ID, chart.ID) - } - } -} - -func prepareEnergidV241() (*Energid, func()) { - srv := prepareEnergidEndPoint() - energid := New() - energid.URL = srv.URL - - return energid, srv.Close -} - -func prepareEnergidInvalidData() (*Energid, func()) { - srv := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - _, _ = w.Write([]byte("Hello world!")) - })) - energid := New() - energid.URL = srv.URL - - return energid, srv.Close -} - -func prepareEnergid404() (*Energid, func()) { - srv := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusNotFound) - })) - energid := New() - energid.URL = srv.URL - - return energid, srv.Close -} - -func prepareEnergidConnectionRefused() (*Energid, func()) { - energid := New() - energid.URL = "http://127.0.0.1:38001" - - return energid, func() {} -} - -func prepareEnergidEndPoint() *httptest.Server { - return httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - body, _ := io.ReadAll(r.Body) - var requests rpcRequests - if err := json.Unmarshal(body, &requests); err != nil || len(requests) == 0 { - w.WriteHeader(http.StatusInternalServerError) - return - } - - var responses rpcResponses - for _, req := range requests { - resp := rpcResponse{JSONRPC: jsonRPCVersion, ID: req.ID} - switch req.Method { - case methodGetBlockchainInfo: - resp.Result = prepareResult(v241GetBlockchainInfo) - case methodGetMemPoolInfo: - resp.Result = prepareResult(v241GetMemPoolInfo) - case methodGetNetworkInfo: - resp.Result = prepareResult(v241GetNetworkInfo) - case methodGetTXOutSetInfo: - resp.Result = prepareResult(v241GetTXOutSetInfo) - case methodGetMemoryInfo: - resp.Result = prepareResult(v241GetMemoryInfo) - default: - resp.Error = &rpcError{Code: -32601, Message: "Method not found"} - } - responses = append(responses, resp) - } - - bs, _ := json.Marshal(responses) - _, _ = w.Write(bs) - })) -} - -func prepareResult(resp []byte) json.RawMessage { - var r rpcResponse - _ = json.Unmarshal(resp, &r) - return r.Result -} diff --git a/modules/energid/init.go b/modules/energid/init.go deleted file mode 100644 index 3b7b7fb9e..000000000 --- a/modules/energid/init.go +++ /dev/null @@ -1,31 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package energid - -import ( - "errors" - "net/http" - - "github.com/netdata/go.d.plugin/agent/module" - "github.com/netdata/go.d.plugin/pkg/web" -) - -func (e Energid) validateConfig() error { - if e.URL == "" { - return errors.New("URL not set") - } - - if _, err := web.NewHTTPRequest(e.Request); err != nil { - return err - } - - return nil -} - -func (e Energid) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(e.Client) -} - -func (e Energid) initCharts() (*module.Charts, error) { - return charts.Copy(), nil -} diff --git a/modules/energid/integrations/energi_core_wallet.md b/modules/energid/integrations/energi_core_wallet.md deleted file mode 100644 index 1215bed15..000000000 --- a/modules/energid/integrations/energi_core_wallet.md +++ /dev/null @@ -1,224 +0,0 @@ - - -# Energi Core Wallet - - - - - -Plugin: go.d.plugin -Module: apache - - - -## Overview - -This module monitors Energi Core Wallet instances. -Works only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet). - - - - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -This integration doesn't support auto-detection. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per Energi Core Wallet instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| energid.blockindex | blocks, headers | count | -| energid.difficulty | difficulty | difficulty | -| energid.mempool | max, usage, tx_size | bytes | -| energid.secmem | total, used, free, locked | bytes | -| energid.network | connections | connections | -| energid.timeoffset | timeoffset | seconds | -| energid.utxo_transactions | transactions, output_transactions | transactions | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -No action required. - -### Configuration - -#### File - -The configuration file name for this integration is `go.d/energid.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config go.d/energid.conf -``` -#### Options - -The following options can be defined globally: update_every, autodetection_retry. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Data collection frequency. | 1 | no | -| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | -| url | Server URL. | http://127.0.0.1:9796 | yes | -| timeout | HTTP request timeout. | 1 | no | -| username | Username for basic HTTP authentication. | | no | -| password | Password for basic HTTP authentication. | | no | -| proxy_url | Proxy URL. | | no | -| proxy_username | Username for proxy basic HTTP authentication. | | no | -| proxy_password | Password for proxy basic HTTP authentication. | | no | -| method | HTTP request method. | GET | no | -| body | HTTP request body. | | no | -| headers | HTTP request headers. | | no | -| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | -| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | -| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | -| tls_cert | Client TLS certificate. | | no | -| tls_key | Client TLS key. | | no | - -
- -#### Examples - -##### Basic - -A basic example configuration. - -```yaml -jobs: - - name: local - url: http://127.0.0.1:9796 - -``` -##### HTTP authentication - -Basic HTTP authentication. - -
Config - -```yaml -jobs: - - name: local - url: http://127.0.0.1:9796 - username: username - password: password - -``` -
- -##### HTTPS with self-signed certificate - -Do not validate server certificate chain and hostname. - - -
Config - -```yaml -jobs: - - name: local - url: https://127.0.0.1:9796 - tls_skip_verify: yes - -``` -
- -##### Multi-instance - -> **Note**: When you define multiple jobs, their names must be unique. - -Collecting metrics from local and remote instances. - - -
Config - -```yaml -jobs: - - name: local - url: http://127.0.0.1:9796 - - - name: remote - url: http://192.0.2.1:9796 - -``` -
- - - -## Troubleshooting - -### Debug Mode - -To troubleshoot issues with the `apache` collector, run the `go.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `go.d.plugin` to debug the collector: - - ```bash - ./go.d.plugin -d -m apache - ``` - - diff --git a/modules/energid/jsonrpc.go b/modules/energid/jsonrpc.go deleted file mode 100644 index c3a80e9b0..000000000 --- a/modules/energid/jsonrpc.go +++ /dev/null @@ -1,48 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package energid - -import ( - "encoding/json" - "fmt" -) - -// https://www.jsonrpc.org/specification#request_object -type ( - rpcRequest struct { - JSONRPC string `json:"jsonrpc"` - Method string `json:"method"` - ID int `json:"id"` - } - rpcRequests []rpcRequest -) - -// http://www.jsonrpc.org/specification#response_object -type ( - rpcResponse struct { - JSONRPC string `json:"jsonrpc"` - Result json.RawMessage `json:"result"` - Error *rpcError `json:"error"` - ID int `json:"id"` - } - rpcResponses []rpcResponse -) - -func (rs rpcResponses) getByID(id int) *rpcResponse { - for _, r := range rs { - if r.ID == id { - return &r - } - } - return nil -} - -// http://www.jsonrpc.org/specification#error_object -type rpcError struct { - Code int64 `json:"code"` - Message string `json:"message"` -} - -func (e rpcError) String() string { - return fmt.Sprintf("%s (code %d)", e.Message, e.Code) -} diff --git a/modules/energid/metadata.yaml b/modules/energid/metadata.yaml deleted file mode 100644 index c32f7cb57..000000000 --- a/modules/energid/metadata.yaml +++ /dev/null @@ -1,225 +0,0 @@ -plugin_name: go.d.plugin -modules: - - meta: - id: collector-go.d.plugin-energid - module_name: apache - plugin_name: energid - monitored_instance: - name: Energi Core Wallet - link: "" - icon_filename: energi.png - categories: - - data-collection.blockchain-servers - keywords: - - energid - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - most_popular: true - overview: - data_collection: - metrics_description: | - This module monitors Energi Core Wallet instances. - Works only with [Generation 2 wallets](https://docs.energi.software/en/downloads/gen2-core-wallet). - method_description: "" - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "" - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: [] - configuration: - file: - name: go.d/energid.conf - options: - description: | - The following options can be defined globally: update_every, autodetection_retry. - folding: - title: Config options - enabled: true - list: - - name: update_every - description: Data collection frequency. - default_value: 1 - required: false - - name: autodetection_retry - description: Recheck interval in seconds. Zero means no recheck will be scheduled. - default_value: 0 - required: false - - name: url - description: Server URL. - default_value: http://127.0.0.1:9796 - required: true - - name: timeout - description: HTTP request timeout. - default_value: 1 - required: false - - name: username - description: Username for basic HTTP authentication. - default_value: "" - required: false - - name: password - description: Password for basic HTTP authentication. - default_value: "" - required: false - - name: proxy_url - description: Proxy URL. - default_value: "" - required: false - - name: proxy_username - description: Username for proxy basic HTTP authentication. - default_value: "" - required: false - - name: proxy_password - description: Password for proxy basic HTTP authentication. - default_value: "" - required: false - - name: method - description: HTTP request method. - default_value: GET - required: false - - name: body - description: HTTP request body. - default_value: "" - required: false - - name: headers - description: HTTP request headers. - default_value: "" - required: false - - name: not_follow_redirects - description: Redirect handling policy. Controls whether the client follows redirects. - default_value: no - required: false - - name: tls_skip_verify - description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. - default_value: no - required: false - - name: tls_ca - description: Certification authority that the client uses when verifying the server's certificates. - default_value: "" - required: false - - name: tls_cert - description: Client TLS certificate. - default_value: "" - required: false - - name: tls_key - description: Client TLS key. - default_value: "" - required: false - examples: - folding: - title: Config - enabled: true - list: - - name: Basic - folding: - enabled: false - description: A basic example configuration. - config: | - jobs: - - name: local - url: http://127.0.0.1:9796 - - name: HTTP authentication - description: Basic HTTP authentication. - config: | - jobs: - - name: local - url: http://127.0.0.1:9796 - username: username - password: password - - name: HTTPS with self-signed certificate - description: | - Do not validate server certificate chain and hostname. - config: | - jobs: - - name: local - url: https://127.0.0.1:9796 - tls_skip_verify: yes - - name: Multi-instance - description: | - > **Note**: When you define multiple jobs, their names must be unique. - - Collecting metrics from local and remote instances. - config: | - jobs: - - name: local - url: http://127.0.0.1:9796 - - - name: remote - url: http://192.0.2.1:9796 - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: These metrics refer to the entire monitored application. - labels: [] - metrics: - - name: energid.blockindex - description: Blockchain index - unit: count - chart_type: area - dimensions: - - name: blocks - - name: headers - - name: energid.difficulty - description: Blockchain difficulty - unit: difficulty - chart_type: line - dimensions: - - name: difficulty - - name: energid.mempool - description: Memory pool - unit: bytes - chart_type: area - dimensions: - - name: max - - name: usage - - name: tx_size - - name: energid.secmem - description: Secure memory - unit: bytes - chart_type: area - dimensions: - - name: total - - name: used - - name: free - - name: locked - - name: energid.network - description: Network - unit: connections - chart_type: line - dimensions: - - name: connections - - name: energid.timeoffset - description: Network time offset - unit: seconds - chart_type: line - dimensions: - - name: timeoffset - - name: energid.utxo_transactions - description: Transactions - unit: transactions - chart_type: line - dimensions: - - name: transactions - - name: output_transactions diff --git a/modules/energid/metrics.go b/modules/energid/metrics.go deleted file mode 100644 index 2e77edf91..000000000 --- a/modules/energid/metrics.go +++ /dev/null @@ -1,49 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package energid - -// API docs: https://github.com/energicryptocurrency/core-api-documentation - -type energidInfo struct { - Blockchain *blockchainInfo `stm:"blockchain"` - MemPool *memPoolInfo `stm:"mempool"` - Network *networkInfo `stm:"network"` - TxOutSet *txOutSetInfo `stm:"utxo"` - Memory *memoryInfo `stm:"secmem"` -} - -// https://github.com/energicryptocurrency/core-api-documentation#getblockchaininfo -type blockchainInfo struct { - Blocks float64 `stm:"blocks" json:"blocks"` - Headers float64 `stm:"headers" json:"headers"` - Difficulty float64 `stm:"difficulty,1000,1" json:"difficulty"` -} - -// https://github.com/energicryptocurrency/core-api-documentation#getmempoolinfo -type memPoolInfo struct { - Bytes float64 `stm:"txsize" json:"bytes"` - Usage float64 `stm:"current" json:"usage"` - MaxMemPool float64 `stm:"max" json:"maxmempool"` -} - -// https://github.com/energicryptocurrency/core-api-documentation#getnetworkinfo -type networkInfo struct { - TimeOffset float64 `stm:"timeoffset" json:"timeoffset"` - Connections float64 `stm:"connections" json:"connections"` -} - -// https://github.com/energicryptocurrency/core-api-documentation#gettxoutsetinfo -type txOutSetInfo struct { - Transactions float64 `stm:"transactions" json:"transactions"` - TxOuts float64 `stm:"output_transactions" json:"txouts"` -} - -// undocumented -type memoryInfo struct { - Locked struct { - Used float64 `stm:"used" json:"used"` - Free float64 `stm:"free" json:"free"` - Total float64 `stm:"total" json:"total"` - Locked float64 `stm:"locked" json:"locked"` - } `stm:"" json:"locked"` -} diff --git a/modules/energid/testdata/v2.4.1/getblockchaininfo.json b/modules/energid/testdata/v2.4.1/getblockchaininfo.json deleted file mode 100644 index 7d194d62a..000000000 --- a/modules/energid/testdata/v2.4.1/getblockchaininfo.json +++ /dev/null @@ -1,66 +0,0 @@ -{ - "result": { - "chain": "test", - "blocks": 1, - "headers": 1, - "bestblockhash": "ee84bfa5f6cafe2ba7f164cee0c33ec63aca76edffa4e8e94656a9be2262cf74", - "difficulty": 4.656542373906925e-10, - "mediantime": 1524344801, - "verificationprogress": 3.57591520058473e-07, - "chainwork": "0000000000000000000000000000000000000000000000000000000000000002", - "pruned": false, - "pos": false, - "posv2": false, - "softforks": [ - { - "id": "bip34", - "version": 2, - "reject": { - "status": false - } - }, - { - "id": "bip66", - "version": 3, - "reject": { - "status": false - } - }, - { - "id": "bip65", - "version": 4, - "reject": { - "status": false - } - } - ], - "bip9_softforks": { - "csv": { - "status": "defined", - "startTime": 1486252800, - "timeout": 1549328400, - "since": 1 - }, - "dip0001": { - "status": "defined", - "startTime": 1505692800, - "timeout": 1549328400, - "since": 1 - }, - "bip147": { - "status": "defined", - "startTime": 1546300800, - "timeout": 1549328400, - "since": 1 - }, - "spork17": { - "status": "defined", - "startTime": 1566129600, - "timeout": 1577793600, - "since": 1 - } - } - }, - "error": null, - "id": "1" -} diff --git a/modules/energid/testdata/v2.4.1/getmemoryinfo.json b/modules/energid/testdata/v2.4.1/getmemoryinfo.json deleted file mode 100644 index 9fdece550..000000000 --- a/modules/energid/testdata/v2.4.1/getmemoryinfo.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "result": { - "locked": { - "used": 288, - "free": 65248, - "total": 65536, - "locked": 65536, - "chunks_used": 4, - "chunks_free": 2 - } - }, - "error": null, - "id": "1" -} diff --git a/modules/energid/testdata/v2.4.1/getmempoolinfo.json b/modules/energid/testdata/v2.4.1/getmempoolinfo.json deleted file mode 100644 index 8845555b1..000000000 --- a/modules/energid/testdata/v2.4.1/getmempoolinfo.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "result": { - "size": 1, - "bytes": 1, - "usage": 1, - "maxmempool": 300000000, - "mempoolminfee": 1 - }, - "error": null, - "id": "1" -} diff --git a/modules/energid/testdata/v2.4.1/getnetworkinfo.json b/modules/energid/testdata/v2.4.1/getnetworkinfo.json deleted file mode 100644 index 59df2c5ad..000000000 --- a/modules/energid/testdata/v2.4.1/getnetworkinfo.json +++ /dev/null @@ -1,41 +0,0 @@ -{ - "result": { - "version": 2040100, - "subversion": "/Energi Core:2.4.1/", - "protocolversion": 70213, - "localservices": "0000000000000005", - "localrelay": true, - "timeoffset": 1, - "networkactive": true, - "connections": 1, - "networks": [ - { - "name": "ipv4", - "limited": false, - "reachable": true, - "proxy": "", - "proxy_randomize_credentials": false - }, - { - "name": "ipv6", - "limited": false, - "reachable": true, - "proxy": "", - "proxy_randomize_credentials": false - }, - { - "name": "onion", - "limited": true, - "reachable": false, - "proxy": "", - "proxy_randomize_credentials": false - } - ], - "relayfee": 1e-05, - "incrementalfee": 1e-05, - "localaddresses": [], - "warnings": "" - }, - "error": null, - "id": "1" -} diff --git a/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json b/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json deleted file mode 100644 index 5bc606f57..000000000 --- a/modules/energid/testdata/v2.4.1/gettxoutsetinfo.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "result": { - "height": 1, - "bestblock": "ee84bfa5f6cafe2ba7f164cee0c33ec63aca76edffa4e8e94656a9be2262cf74", - "transactions": 1, - "txouts": 1, - "hash_serialized_2": "ba3631e5919f37c8f542658238de0516612a7063fbd6143ef813a4e1cc4548e1", - "disk_size": 1, - "total_amount": 1 - }, - "error": null, - "id": "1" -} diff --git a/modules/envoy/config_schema.json b/modules/envoy/config_schema.json index 48b3c9478..0fb609d2e 100644 --- a/modules/envoy/config_schema.json +++ b/modules/envoy/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/envoy job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Envoy collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Envoy Prometheus endpoint.", + "type": "string", + "default": "http://127.0.0.1:9091/stats/prometheus" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/envoy/envoy.go b/modules/envoy/envoy.go index de9efa13d..2415cad2a 100644 --- a/modules/envoy/envoy.go +++ b/modules/envoy/envoy.go @@ -4,6 +4,7 @@ package envoy import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Envoy { URL: "http://127.0.0.1:9091/stats/prometheus", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, }, }, @@ -46,12 +47,14 @@ func New() *Envoy { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type Envoy struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` prom prometheus.Prometheus @@ -65,24 +68,37 @@ type Envoy struct { listenerDownstream map[string]bool } -func (e *Envoy) Init() bool { +func (e *Envoy) Configuration() any { + return e.Config +} + +func (e *Envoy) Init() error { if err := e.validateConfig(); err != nil { e.Errorf("config validation: %v", err) - return false + return err } prom, err := e.initPrometheusClient() if err != nil { e.Errorf("init Prometheus client: %v", err) - return false + return err } e.prom = prom - return true + return nil } -func (e *Envoy) Check() bool { - return len(e.Collect()) > 0 +func (e *Envoy) Check() error { + mx, err := e.collect() + if err != nil { + e.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (e *Envoy) Charts() *module.Charts { diff --git a/modules/envoy/envoy_test.go b/modules/envoy/envoy_test.go index 3bdd82cb1..7bf5325ac 100644 --- a/modules/envoy/envoy_test.go +++ b/modules/envoy/envoy_test.go @@ -53,9 +53,9 @@ func TestEnvoy_Init(t *testing.T) { envoy.Config = test.config if test.wantFail { - assert.False(t, envoy.Init()) + assert.Error(t, envoy.Init()) } else { - assert.True(t, envoy.Init()) + assert.NoError(t, envoy.Init()) } }) } @@ -66,7 +66,7 @@ func TestEnvoy_Cleanup(t *testing.T) { envoy := New() assert.NotPanics(t, envoy.Cleanup) - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) assert.NotPanics(t, envoy.Cleanup) } @@ -76,7 +76,7 @@ func TestEnvoy_Charts(t *testing.T) { require.Empty(t, *envoy.Charts()) - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) _ = envoy.Collect() require.NotEmpty(t, *envoy.Charts()) } @@ -109,12 +109,12 @@ func TestEnvoy_Check(t *testing.T) { envoy, cleanup := test.prepare() defer cleanup() - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) if test.wantFail { - assert.False(t, envoy.Check()) + assert.Error(t, envoy.Check()) } else { - assert.True(t, envoy.Check()) + assert.NoError(t, envoy.Check()) } }) } @@ -489,7 +489,7 @@ func TestEnvoy_Collect(t *testing.T) { envoy, cleanup := test.prepare() defer cleanup() - require.True(t, envoy.Init()) + require.NoError(t, envoy.Init()) mx := envoy.Collect() diff --git a/modules/example/config_schema.json b/modules/example/config_schema.json index 852b39b1c..13299d0ed 100644 --- a/modules/example/config_schema.json +++ b/modules/example/config_schema.json @@ -1,68 +1,65 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/example job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "charts": { - "type": "object", - "properties": { - "type": { - "type": "string" - }, - "num": { - "type": "integer" - }, - "contexts": { - "type": "integer" - }, - "dimensions": { - "type": "integer" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/example job configuration schema.", + "type": "object", + "properties": { + "charts": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "num": { + "type": "integer", + "minimum": 1, + "default": 1 + }, + "contexts": { + "type": "integer" + }, + "dimensions": { + "type": "integer", + "minimum": 1, + "default": 2 + }, + "labels": { + "type": "integer" + } }, - "labels": { - "type": "integer" - } + "required": [ + "num", + "dimensions" + ] }, - "required": [ - "type", - "num", - "contexts", - "dimensions", - "labels" - ] - }, - "hidden_charts": { - "type": "object", - "properties": { - "type": { - "type": "string" - }, - "num": { - "type": "integer" - }, - "contexts": { - "type": "integer" - }, - "dimensions": { - "type": "integer" - }, - "labels": { - "type": "integer" + "hidden_charts": { + "type": "object", + "properties": { + "type": { + "type": "string" + }, + "num": { + "type": "integer" + }, + "contexts": { + "type": "integer" + }, + "dimensions": { + "type": "integer" + }, + "labels": { + "type": "integer" + } } - }, - "required": [ - "type", - "num", - "contexts", - "dimensions", - "labels" - ] - } + } + }, + "required": [ + "charts" + ] }, - "required": [ - "name", - "charts" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/example/example.go b/modules/example/example.go index fe24bcc3e..8105da6d9 100644 --- a/modules/example/example.go +++ b/modules/example/example.go @@ -4,6 +4,7 @@ package example import ( _ "embed" + "errors" "math/rand" "github.com/netdata/go.d.plugin/agent/module" @@ -17,7 +18,7 @@ func init() { JobConfigSchema: configSchema, Defaults: module.Defaults{ UpdateEvery: module.UpdateEvery, - AutoDetectionRetry: module.AutoDetectionRetry, + AutoDetectionRetry: 5, Priority: module.Priority, Disabled: true, }, @@ -45,15 +46,15 @@ func New() *Example { type ( Config struct { - Charts ConfigCharts `yaml:"charts"` - HiddenCharts ConfigCharts `yaml:"hidden_charts"` + Charts ConfigCharts `yaml:"charts" json:"charts"` + HiddenCharts ConfigCharts `yaml:"hidden_charts" json:"hidden_charts"` } ConfigCharts struct { - Type string `yaml:"type"` - Num int `yaml:"num"` - Contexts int `yaml:"contexts"` - Dims int `yaml:"dimensions"` - Labels int `yaml:"labels"` + Type string `yaml:"type" json:"type"` + Num int `yaml:"num" json:"num"` + Contexts int `yaml:"contexts" json:"context"` + Dims int `yaml:"dimensions" json:"dimensions"` + Labels int `yaml:"labels" json:"labels"` } ) @@ -66,24 +67,40 @@ type Example struct { collectedDims map[string]bool } -func (e *Example) Init() bool { +func (e *Example) Configuration() any { + return e.Config +} + +func (e *Example) Init() error { err := e.validateConfig() if err != nil { e.Errorf("config validation: %v", err) - return false + return err } charts, err := e.initCharts() if err != nil { e.Errorf("charts init: %v", err) - return false + return err } e.charts = charts - return true + return nil } -func (e *Example) Check() bool { - return len(e.Collect()) > 0 +func (e *Example) Check() error { + if e.Config.Charts.Dims == 5 { + return errors.New("guess what, 5 dimension is not allowed") + } + mx, err := e.collect() + if err != nil { + e.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (e *Example) Charts() *module.Charts { diff --git a/modules/example/example_test.go b/modules/example/example_test.go index 47cc51a2f..36181727d 100644 --- a/modules/example/example_test.go +++ b/modules/example/example_test.go @@ -96,9 +96,9 @@ func TestExample_Init(t *testing.T) { example.Config = test.config if test.wantFail { - assert.False(t, example.Init()) + assert.Error(t, example.Init()) } else { - assert.True(t, example.Init()) + assert.NoError(t, example.Init()) } }) } @@ -124,12 +124,12 @@ func TestExample_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { example := test.prepare() - require.True(t, example.Init()) + require.NoError(t, example.Init()) if test.wantFail { - assert.False(t, example.Check()) + assert.Error(t, example.Check()) } else { - assert.True(t, example.Check()) + assert.NoError(t, example.Check()) } }) } @@ -153,7 +153,7 @@ func TestExample_Charts(t *testing.T) { "initialized collector": { prepare: func(t *testing.T) *Example { example := New() - require.True(t, example.Init()) + require.NoError(t, example.Init()) return example }, }, @@ -259,7 +259,7 @@ func TestExample_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { example := test.prepare() - require.True(t, example.Init()) + require.NoError(t, example.Init()) collected := example.Collect() diff --git a/modules/filecheck/collect_dirs.go b/modules/filecheck/collect_dirs.go index 32861c0e0..622cbf76a 100644 --- a/modules/filecheck/collect_dirs.go +++ b/modules/filecheck/collect_dirs.go @@ -14,7 +14,7 @@ import ( func (fc *Filecheck) collectDirs(ms map[string]int64) { curTime := time.Now() - if time.Since(fc.lastDiscoveryDirs) >= fc.DiscoveryEvery.Duration { + if time.Since(fc.lastDiscoveryDirs) >= fc.DiscoveryEvery.Duration() { fc.lastDiscoveryDirs = curTime fc.curDirs = fc.discoveryDirs() fc.updateDirsCharts(fc.curDirs) @@ -54,7 +54,7 @@ func (fc *Filecheck) collectDir(ms map[string]int64, path string, curTime time.T } } -func (fc Filecheck) discoveryDirs() (dirs []string) { +func (fc *Filecheck) discoveryDirs() (dirs []string) { for _, path := range fc.Dirs.Include { if hasMeta(path) { continue diff --git a/modules/filecheck/collect_files.go b/modules/filecheck/collect_files.go index 25568473f..a3dd93ef8 100644 --- a/modules/filecheck/collect_files.go +++ b/modules/filecheck/collect_files.go @@ -14,7 +14,7 @@ import ( func (fc *Filecheck) collectFiles(ms map[string]int64) { curTime := time.Now() - if time.Since(fc.lastDiscoveryFiles) >= fc.DiscoveryEvery.Duration { + if time.Since(fc.lastDiscoveryFiles) >= fc.DiscoveryEvery.Duration() { fc.lastDiscoveryFiles = curTime fc.curFiles = fc.discoveryFiles() fc.updateFilesCharts(fc.curFiles) @@ -47,7 +47,7 @@ func (fc *Filecheck) collectFile(ms map[string]int64, path string, curTime time. ms[fileDimID(path, "mtime_ago")] = int64(curTime.Sub(info.ModTime()).Seconds()) } -func (fc Filecheck) discoveryFiles() (files []string) { +func (fc *Filecheck) discoveryFiles() (files []string) { for _, path := range fc.Files.Include { if hasMeta(path) { continue diff --git a/modules/filecheck/config_schema.json b/modules/filecheck/config_schema.json index a6b0efca9..fd1344235 100644 --- a/modules/filecheck/config_schema.json +++ b/modules/filecheck/config_schema.json @@ -1,75 +1,87 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/filecheck job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "discovery_every": { - "type": [ - "string", - "integer" - ] - }, - "files": { - "type": "object", - "properties": { - "include": { - "type": "array", - "items": { - "type": "string" - } - }, - "exclude": { - "type": "array", - "items": { - "type": "string" - } - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Filecheck collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "minimum": 1, + "default": 1, + "type": "integer" }, - "required": [ - "include", - "exclude" - ] - }, - "dirs": { - "type": "object", - "properties": { - "include": { - "type": "array", - "items": { - "type": "string" + "files": { + "title": "File selector", + "description": "Files matching the selector will be monitored. The logic for inclusion and exclusion is as follows: (include1 OR include2) AND !(exclude1 or exclude2). Patterns follow the syntax of shell file name patterns.", + "type": "object", + "properties": { + "include": { + "title": "Include", + "description": "Include files that match any of the specified include patterns.", + "type": "array", + "items": { + "title": "Filepath", + "type": "string" + }, + "uniqueItems": true + }, + "exclude": { + "title": "Exclude", + "description": "Exclude files that match any of the specified exclude patterns.", + "type": "array", + "items": { + "title": "Filepath", + "type": "string" + }, + "uniqueItems": true } }, - "exclude": { - "type": "array", - "items": { - "type": "string" + "required": [ + "include" + ] + }, + "dirs": { + "title": "Directory selector", + "description": "Directories matching the selector will be monitored. The logic for inclusion and exclusion is as follows: (include1 OR include2) AND !(exclude1 or exclude2). Patterns follow the syntax of shell file name patterns.", + "type": "object", + "properties": { + "include": { + "title": "Include", + "description": "Include directories that match any of the specified include patterns.", + "type": "array", + "items": { + "title": "Directory", + "type": "string" + }, + "uniqueItems": true + }, + "exclude": { + "title": "Exclude", + "description": "Exclude directories that match any of the specified exclude patterns.", + "type": "array", + "items": { + "title": "Directory", + "type": "string" + }, + "uniqueItems": true } }, - "collect_dir_size": { - "type": "boolean" - } + "required": [ + "include" + ] }, - "required": [ - "include", - "exclude" - ] + "collect_dir_size": { + "title": "Collect directory size?", + "description": "Enable the collection of directory sizes for each monitored directory. Enabling this option may introduce additional overhead on both Netdata and the host system, particularly if directories contain a large number of subdirectories and files.", + "type": "boolean", + "default": false + } } }, - "oneOf": [ - { - "required": [ - "name", - "files" - ] - }, - { - "required": [ - "name", - "dirs" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true } - ] + } } diff --git a/modules/filecheck/filecheck.go b/modules/filecheck/filecheck.go index e1369bc1c..30e4d921a 100644 --- a/modules/filecheck/filecheck.go +++ b/modules/filecheck/filecheck.go @@ -26,7 +26,7 @@ func init() { func New() *Filecheck { return &Filecheck{ Config: Config{ - DiscoveryEvery: web.Duration{Duration: time.Second * 30}, + DiscoveryEvery: web.Duration(time.Second * 30), Files: filesConfig{}, Dirs: dirsConfig{ CollectDirSize: true, @@ -39,18 +39,20 @@ func New() *Filecheck { type ( Config struct { - DiscoveryEvery web.Duration `yaml:"discovery_every"` - Files filesConfig `yaml:"files"` - Dirs dirsConfig `yaml:"dirs"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + DiscoveryEvery web.Duration `yaml:"discovery_every" json:"discovery_every"` + Files filesConfig `yaml:"files" json:"files"` + Dirs dirsConfig `yaml:"dirs" json:"dirs"` } filesConfig struct { - Include []string `yaml:"include"` - Exclude []string `yaml:"exclude"` + Include []string `yaml:"include" json:"include"` + Exclude []string `yaml:"exclude" json:"exclude"` } dirsConfig struct { - Include []string `yaml:"include"` - Exclude []string `yaml:"exclude"` - CollectDirSize bool `yaml:"collect_dir_size"` + Include []string `yaml:"include" json:"include"` + Exclude []string `yaml:"exclude" json:"exclude"` + CollectDirSize bool `yaml:"collect_dir_size" json:"collect_dir_size"` } ) @@ -69,30 +71,32 @@ type Filecheck struct { charts *module.Charts } -func (Filecheck) Cleanup() { +func (fc *Filecheck) Configuration() any { + return fc.Config } -func (fc *Filecheck) Init() bool { +func (fc *Filecheck) Init() error { err := fc.validateConfig() if err != nil { fc.Errorf("error on validating config: %v", err) - return false + return err } charts, err := fc.initCharts() if err != nil { fc.Errorf("error on charts initialization: %v", err) - return false + return err } fc.charts = charts fc.Debugf("monitored files: %v", fc.Files.Include) fc.Debugf("monitored dirs: %v", fc.Dirs.Include) - return true + + return nil } -func (fc Filecheck) Check() bool { - return true +func (fc *Filecheck) Check() error { + return nil } func (fc *Filecheck) Charts() *module.Charts { @@ -110,3 +114,6 @@ func (fc *Filecheck) Collect() map[string]int64 { } return ms } + +func (fc *Filecheck) Cleanup() { +} diff --git a/modules/filecheck/filecheck_test.go b/modules/filecheck/filecheck_test.go index 5024f6460..e973c26bc 100644 --- a/modules/filecheck/filecheck_test.go +++ b/modules/filecheck/filecheck_test.go @@ -86,9 +86,9 @@ func TestFilecheck_Init(t *testing.T) { fc.Config = test.config if test.wantFail { - assert.False(t, fc.Init()) + assert.Error(t, fc.Init()) } else { - require.True(t, fc.Init()) + require.NoError(t, fc.Init()) assert.Equal(t, test.wantNumOfCharts, len(*fc.Charts())) } }) @@ -111,9 +111,9 @@ func TestFilecheck_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { fc := test.prepare() - require.True(t, fc.Init()) + require.NoError(t, fc.Init()) - assert.True(t, fc.Check()) + assert.NoError(t, fc.Check()) }) } } @@ -226,7 +226,7 @@ func TestFilecheck_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { fc := test.prepare() - require.True(t, fc.Init()) + require.NoError(t, fc.Init()) collected := fc.Collect() diff --git a/modules/filecheck/init.go b/modules/filecheck/init.go index 858e3e503..b2e27459a 100644 --- a/modules/filecheck/init.go +++ b/modules/filecheck/init.go @@ -8,14 +8,14 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -func (fc Filecheck) validateConfig() error { +func (fc *Filecheck) validateConfig() error { if len(fc.Files.Include) == 0 && len(fc.Dirs.Include) == 0 { return errors.New("both 'files->include' and 'dirs->include' are empty") } return nil } -func (fc Filecheck) initCharts() (*module.Charts, error) { +func (fc *Filecheck) initCharts() (*module.Charts, error) { charts := &module.Charts{} if len(fc.Files.Include) > 0 { diff --git a/modules/filecheck/metadata.yaml b/modules/filecheck/metadata.yaml index d4e78cea1..57a121ec1 100644 --- a/modules/filecheck/metadata.yaml +++ b/modules/filecheck/metadata.yaml @@ -60,7 +60,7 @@ modules: default_value: 0 required: false - name: files - description: List of files to monitor. + description: Files matching the selector will be monitored. default_value: "" required: true detailed_description: | diff --git a/modules/fluentd/collect.go b/modules/fluentd/collect.go new file mode 100644 index 000000000..14ee6df68 --- /dev/null +++ b/modules/fluentd/collect.go @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import "fmt" + +func (f *Fluentd) collect() (map[string]int64, error) { + info, err := f.apiClient.getPluginsInfo() + if err != nil { + return nil, err + } + + mx := make(map[string]int64) + + for _, p := range info.Payload { + // TODO: if p.Category == "input" ? + if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() { + continue + } + + if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) { + f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category) + continue + } + + id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) + + if p.hasCategory() { + mx[id+"_retry_count"] = *p.RetryCount + } + if p.hasBufferQueueLength() { + mx[id+"_buffer_queue_length"] = *p.BufferQueueLength + } + if p.hasBufferTotalQueuedSize() { + mx[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize + } + + if !f.activePlugins[id] { + f.activePlugins[id] = true + f.addPluginToCharts(p) + } + + } + + return mx, nil +} + +func (f *Fluentd) addPluginToCharts(p pluginData) { + id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) + + if p.hasCategory() { + chart := f.charts.Get("retry_count") + _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID}) + chart.MarkNotCreated() + } + if p.hasBufferQueueLength() { + chart := f.charts.Get("buffer_queue_length") + _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID}) + chart.MarkNotCreated() + } + if p.hasBufferTotalQueuedSize() { + chart := f.charts.Get("buffer_total_queued_size") + _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID}) + chart.MarkNotCreated() + } +} diff --git a/modules/fluentd/config_schema.json b/modules/fluentd/config_schema.json index f5bfe3047..3dab94d20 100644 --- a/modules/fluentd/config_schema.json +++ b/modules/fluentd/config_schema.json @@ -1,62 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/fluentd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Fluentd collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "url": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Fluentd built-in webserver.", + "type": "string", + "default": "http://127.0.0.1:24220" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "permit_plugin_id": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "username": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/fluentd/fluentd.go b/modules/fluentd/fluentd.go index 5b627b7b4..a18f7d4f8 100644 --- a/modules/fluentd/fluentd.go +++ b/modules/fluentd/fluentd.go @@ -4,7 +4,7 @@ package fluentd import ( _ "embed" - "fmt" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -23,145 +23,109 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1:24220" - defaultHTTPTimeout = time.Second * 2 -) - // New creates Fluentd with default values. func New() *Fluentd { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, - }, - }} - return &Fluentd{ - Config: config, + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:24220", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, + }}, activePlugins: make(map[string]bool), charts: charts.Copy(), } } type Config struct { - web.HTTP `yaml:",inline"` - PermitPlugin string `yaml:"permit_plugin_id"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` + PermitPlugin string `yaml:"permit_plugin_id" json:"permit_plugin"` } // Fluentd Fluentd module. type Fluentd struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` + + apiClient *apiClient + charts *Charts permitPlugin matcher.Matcher - apiClient *apiClient activePlugins map[string]bool - charts *Charts } -// Cleanup makes cleanup. -func (Fluentd) Cleanup() {} +func (f *Fluentd) Configuration() any { + return f.Config +} // Init makes initialization. -func (f *Fluentd) Init() bool { - if f.URL == "" { - f.Error("URL not set") - return false +func (f *Fluentd) Init() error { + if err := f.validateConfig(); err != nil { + f.Error(err) + return err } - if f.PermitPlugin != "" { - m, err := matcher.NewSimplePatternsMatcher(f.PermitPlugin) - if err != nil { - f.Errorf("error on creating permit_plugin matcher : %v", err) - return false - } - f.permitPlugin = matcher.WithCache(m) + pm, err := f.initPermitPluginMatcher() + if err != nil { + f.Error(err) + return err + } + if pm != nil { + f.permitPlugin = pm } - client, err := web.NewHTTPClient(f.Client) + client, err := f.initApiClient() if err != nil { - f.Errorf("error on creating client : %v", err) - return false + f.Error(err) + return err } - - f.apiClient = newAPIClient(client, f.Request) + f.apiClient = client f.Debugf("using URL %s", f.URL) - f.Debugf("using timeout: %s", f.Timeout.Duration) + f.Debugf("using timeout: %s", f.Timeout.Duration()) - return true + return nil } // Check makes check. -func (f Fluentd) Check() bool { return len(f.Collect()) > 0 } +func (f *Fluentd) Check() error { + mx, err := f.collect() + if err != nil { + f.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil +} // Charts creates Charts. -func (f Fluentd) Charts() *Charts { return f.charts } +func (f *Fluentd) Charts() *Charts { + return f.charts +} // Collect collects metrics. func (f *Fluentd) Collect() map[string]int64 { - info, err := f.apiClient.getPluginsInfo() + mx, err := f.collect() if err != nil { f.Error(err) return nil } - metrics := make(map[string]int64) - - for _, p := range info.Payload { - // TODO: if p.Category == "input" ? - if !p.hasCategory() && !p.hasBufferQueueLength() && !p.hasBufferTotalQueuedSize() { - continue - } - - if f.permitPlugin != nil && !f.permitPlugin.MatchString(p.ID) { - f.Debugf("plugin id: '%s', type: '%s', category: '%s' denied", p.ID, p.Type, p.Category) - continue - } - - id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) - - if p.hasCategory() { - metrics[id+"_retry_count"] = *p.RetryCount - } - if p.hasBufferQueueLength() { - metrics[id+"_buffer_queue_length"] = *p.BufferQueueLength - } - if p.hasBufferTotalQueuedSize() { - metrics[id+"_buffer_total_queued_size"] = *p.BufferTotalQueuedSize - } - - if !f.activePlugins[id] { - f.activePlugins[id] = true - f.addPluginToCharts(p) - } - - } - - return metrics + return mx } -func (f *Fluentd) addPluginToCharts(p pluginData) { - id := fmt.Sprintf("%s_%s_%s", p.ID, p.Type, p.Category) - - if p.hasCategory() { - chart := f.charts.Get("retry_count") - _ = chart.AddDim(&Dim{ID: id + "_retry_count", Name: p.ID}) - chart.MarkNotCreated() - } - if p.hasBufferQueueLength() { - chart := f.charts.Get("buffer_queue_length") - _ = chart.AddDim(&Dim{ID: id + "_buffer_queue_length", Name: p.ID}) - chart.MarkNotCreated() - } - if p.hasBufferTotalQueuedSize() { - chart := f.charts.Get("buffer_total_queued_size") - _ = chart.AddDim(&Dim{ID: id + "_buffer_total_queued_size", Name: p.ID}) - chart.MarkNotCreated() +// Cleanup makes cleanup. +func (f *Fluentd) Cleanup() { + if f.apiClient != nil && f.apiClient.httpClient != nil { + f.apiClient.httpClient.CloseIdleConnections() } } diff --git a/modules/fluentd/fluentd_test.go b/modules/fluentd/fluentd_test.go index 492e2ebaa..51413d4bf 100644 --- a/modules/fluentd/fluentd_test.go +++ b/modules/fluentd/fluentd_test.go @@ -14,25 +14,16 @@ import ( var testDataPlugins, _ = os.ReadFile("testdata/plugins.json") -func TestNew(t *testing.T) { - job := New() - assert.IsType(t, (*Fluentd)(nil), job) - assert.NotNil(t, job.charts) - assert.NotNil(t, job.activePlugins) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestFluentd_Init(t *testing.T) { // OK job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) //NG job = New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestFluentd_Check(t *testing.T) { @@ -45,14 +36,14 @@ func TestFluentd_Check(t *testing.T) { // OK job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) // NG job = New() job.URL = "http://127.0.0.1:38001/api/plugins.json" - require.True(t, job.Init()) - require.False(t, job.Check()) + require.NoError(t, job.Init()) + require.Error(t, job.Check()) } func TestFluentd_Charts(t *testing.T) { @@ -73,8 +64,8 @@ func TestFluentd_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "output_stdout_stdout_output_retry_count": 0, @@ -97,8 +88,8 @@ func TestFluentd_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestFluentd_404(t *testing.T) { @@ -110,6 +101,6 @@ func TestFluentd_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/fluentd/init.go b/modules/fluentd/init.go new file mode 100644 index 000000000..37627c03d --- /dev/null +++ b/modules/fluentd/init.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package fluentd + +import ( + "errors" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (f *Fluentd) validateConfig() error { + if f.URL == "" { + return errors.New("url not set") + } + + return nil +} + +func (f *Fluentd) initPermitPluginMatcher() (matcher.Matcher, error) { + if f.PermitPlugin == "" { + return nil, nil + } + + return matcher.NewSimplePatternsMatcher(f.PermitPlugin) +} + +func (f *Fluentd) initApiClient() (*apiClient, error) { + client, err := web.NewHTTPClient(f.Client) + if err != nil { + return nil, err + } + + return newAPIClient(client, f.Request), nil +} diff --git a/modules/freeradius/config_schema.json b/modules/freeradius/config_schema.json index b8bd25fa9..2e36fa42d 100644 --- a/modules/freeradius/config_schema.json +++ b/modules/freeradius/config_schema.json @@ -1,31 +1,38 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/freeradius job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/freeradius job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "port": { + "type": "integer" + }, + "secret": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "address": { - "type": "string" - }, - "port": { - "type": "integer" - }, - "secret": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "address", + "port", + "secret" + ] }, - "required": [ - "name", - "address", - "port", - "secret" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/freeradius/freeradius.go b/modules/freeradius/freeradius.go index 5897917cf..89fe9a63d 100644 --- a/modules/freeradius/freeradius.go +++ b/modules/freeradius/freeradius.go @@ -24,72 +24,68 @@ func init() { } func New() *FreeRADIUS { - cfg := Config{ - Address: "127.0.0.1", - Port: 18121, - Secret: "adminsecret", - Timeout: web.Duration{Duration: time.Second}, - } return &FreeRADIUS{ - Config: cfg, + Config: Config{ + Address: "127.0.0.1", + Port: 18121, + Secret: "adminsecret", + Timeout: web.Duration(time.Second), + }, } } +type Config struct { + Address string + Port int + Secret string + Timeout web.Duration +} + type ( - client interface { - Status() (*api.Status, error) - } - Config struct { - Address string - Port int - Secret string - Timeout web.Duration - } FreeRADIUS struct { module.Base Config `yaml:",inline"` client } + client interface { + Status() (*api.Status, error) + } ) -func (f FreeRADIUS) validateConfig() error { - if f.Address == "" { - return errors.New("address not set") - } - if f.Port == 0 { - return errors.New("port not set") - } - if f.Secret == "" { - return errors.New("secret not set") - } - return nil +func (f *FreeRADIUS) Configuration() any { + return f.Config } -func (f *FreeRADIUS) initClient() { +func (f *FreeRADIUS) Init() error { + if err := f.validateConfig(); err != nil { + f.Errorf("config validation: %v", err) + return err + } + f.client = api.New(api.Config{ Address: f.Address, Port: f.Port, Secret: f.Secret, - Timeout: f.Timeout.Duration, + Timeout: f.Timeout.Duration(), }) + + return nil } -func (f *FreeRADIUS) Init() bool { - err := f.validateConfig() +func (f *FreeRADIUS) Check() error { + mx, err := f.collect() if err != nil { - f.Errorf("error on validating config: %v", err) - return false + f.Error(err) + return err } + if len(mx) == 0 { + return errors.New("no metrics collected") - f.initClient() - return true -} - -func (f FreeRADIUS) Check() bool { - return len(f.Collect()) > 0 + } + return nil } -func (FreeRADIUS) Charts() *Charts { +func (f *FreeRADIUS) Charts() *Charts { return charts.Copy() } @@ -105,4 +101,4 @@ func (f *FreeRADIUS) Collect() map[string]int64 { return mx } -func (FreeRADIUS) Cleanup() {} +func (f *FreeRADIUS) Cleanup() {} diff --git a/modules/freeradius/freeradius_test.go b/modules/freeradius/freeradius_test.go index b9432ec96..79bba7002 100644 --- a/modules/freeradius/freeradius_test.go +++ b/modules/freeradius/freeradius_test.go @@ -19,42 +19,42 @@ func TestNew(t *testing.T) { func TestFreeRADIUS_Init(t *testing.T) { freeRADIUS := New() - assert.True(t, freeRADIUS.Init()) + assert.NoError(t, freeRADIUS.Init()) } func TestFreeRADIUS_Init_ReturnsFalseIfAddressNotSet(t *testing.T) { freeRADIUS := New() freeRADIUS.Address = "" - assert.False(t, freeRADIUS.Init()) + assert.Error(t, freeRADIUS.Init()) } func TestFreeRADIUS_Init_ReturnsFalseIfPortNotSet(t *testing.T) { freeRADIUS := New() freeRADIUS.Port = 0 - assert.False(t, freeRADIUS.Init()) + assert.Error(t, freeRADIUS.Init()) } func TestFreeRADIUS_Init_ReturnsFalseIfSecretNotSet(t *testing.T) { freeRADIUS := New() freeRADIUS.Secret = "" - assert.False(t, freeRADIUS.Init()) + assert.Error(t, freeRADIUS.Init()) } func TestFreeRADIUS_Check(t *testing.T) { freeRADIUS := New() freeRADIUS.client = newOKMockClient() - assert.True(t, freeRADIUS.Check()) + assert.NoError(t, freeRADIUS.Check()) } func TestFreeRADIUS_Check_ReturnsFalseIfClientStatusReturnsError(t *testing.T) { freeRADIUS := New() freeRADIUS.client = newErrorMockClient() - assert.False(t, freeRADIUS.Check()) + assert.Error(t, freeRADIUS.Check()) } func TestFreeRADIUS_Charts(t *testing.T) { diff --git a/modules/freeradius/init.go b/modules/freeradius/init.go new file mode 100644 index 000000000..9c14da0ea --- /dev/null +++ b/modules/freeradius/init.go @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package freeradius + +import ( + "errors" +) + +func (f *FreeRADIUS) validateConfig() error { + if f.Address == "" { + return errors.New("address not set") + } + if f.Port == 0 { + return errors.New("port not set") + } + if f.Secret == "" { + return errors.New("secret not set") + } + return nil +} diff --git a/modules/geth/config_schema.json b/modules/geth/config_schema.json index 78d3e0abb..9dbf6a8bd 100644 --- a/modules/geth/config_schema.json +++ b/modules/geth/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/geth job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Geth collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Geth Prometheus endpoint.", + "type": "string", + "default": "http://127.0.0.1:6060/debug/metrics/prometheus" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/geth/geth.go b/modules/geth/geth.go index fe6b2bd96..4b6bf7c52 100644 --- a/modules/geth/geth.go +++ b/modules/geth/geth.go @@ -24,68 +24,65 @@ func init() { } func New() *Geth { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:6060/debug/metrics/prometheus", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &Geth{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:6060/debug/metrics/prometheus", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &Geth{ - Config: config, charts: charts.Copy(), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` - Geth struct { - module.Base - Config `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` +} - prom prometheus.Prometheus - charts *Charts - } -) +type Geth struct { + module.Base + Config `yaml:",inline" json:",inline"` -func (g Geth) validateConfig() error { - if g.URL == "" { - return errors.New("URL is not set") - } - return nil + prom prometheus.Prometheus + charts *Charts +} + +func (g *Geth) Configuration() any { + return g.Config } -func (g *Geth) initClient() error { - client, err := web.NewHTTPClient(g.Client) +func (g *Geth) Init() error { + if err := g.validateConfig(); err != nil { + g.Errorf("error on validating config: %g", err) + return err + } + + prom, err := g.initPrometheusClient() if err != nil { + g.Error(err) return err } + g.prom = prom - g.prom = prometheus.New(client, g.Request) return nil } -func (g *Geth) Init() bool { - if err := g.validateConfig(); err != nil { - g.Errorf("error on validating config: %g", err) - return false +func (g *Geth) Check() error { + mx, err := g.collect() + if err != nil { + g.Error(err) + return err } - if err := g.initClient(); err != nil { - g.Errorf("error on initializing client: %g", err) - return false + if len(mx) == 0 { + return errors.New("no metrics collected") } - return true -} - -func (g *Geth) Check() bool { - return len(g.Collect()) > 0 + return nil } func (g *Geth) Charts() *Charts { @@ -104,4 +101,8 @@ func (g *Geth) Collect() map[string]int64 { return mx } -func (Geth) Cleanup() {} +func (g *Geth) Cleanup() { + if g.prom != nil && g.prom.HTTPClient() != nil { + g.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/geth/init.go b/modules/geth/init.go new file mode 100644 index 000000000..bf9a81712 --- /dev/null +++ b/modules/geth/init.go @@ -0,0 +1,24 @@ +package geth + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (g *Geth) validateConfig() error { + if g.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (g *Geth) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(g.Client) + if err != nil { + return nil, err + } + + return prometheus.New(client, g.Request), nil +} diff --git a/modules/haproxy/config_schema.json b/modules/haproxy/config_schema.json index 9fa8cd111..fa5a60188 100644 --- a/modules/haproxy/config_schema.json +++ b/modules/haproxy/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/haproxy job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "HAProxy collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "HAProxy Prometheus endpoint URL.", + "type": "string", + "default": "http://127.0.0.1:8404/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/haproxy/haproxy.go b/modules/haproxy/haproxy.go index ffc936711..178a964fa 100644 --- a/modules/haproxy/haproxy.go +++ b/modules/haproxy/haproxy.go @@ -4,6 +4,7 @@ package haproxy import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Haproxy { URL: "http://127.0.0.1:8404/metrics", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -41,12 +42,14 @@ func New() *Haproxy { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type Haproxy struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts @@ -55,24 +58,36 @@ type Haproxy struct { proxies map[string]bool } -func (h *Haproxy) Init() bool { +func (h *Haproxy) Configuration() any { + return h.Config +} + +func (h *Haproxy) Init() error { if err := h.validateConfig(); err != nil { h.Errorf("config validation: %v", err) - return false + return err } prom, err := h.initPrometheusClient() if err != nil { h.Errorf("prometheus client initialization: %v", err) - return false + return err } h.prom = prom - return true + return nil } -func (h *Haproxy) Check() bool { - return len(h.Collect()) > 0 +func (h *Haproxy) Check() error { + mx, err := h.collect() + if err != nil { + h.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (h *Haproxy) Charts() *module.Charts { @@ -80,18 +95,20 @@ func (h *Haproxy) Charts() *module.Charts { } func (h *Haproxy) Collect() map[string]int64 { - ms, err := h.collect() + mx, err := h.collect() if err != nil { h.Error(err) return nil } - if len(ms) == 0 { + if len(mx) == 0 { return nil } - return ms + return mx } -func (Haproxy) Cleanup() { - // TODO: close http idle connections +func (h *Haproxy) Cleanup() { + if h.prom != nil && h.prom.HTTPClient() != nil { + h.prom.HTTPClient().CloseIdleConnections() + } } diff --git a/modules/haproxy/haproxy_test.go b/modules/haproxy/haproxy_test.go index c881c19f3..df7c31ed3 100644 --- a/modules/haproxy/haproxy_test.go +++ b/modules/haproxy/haproxy_test.go @@ -62,9 +62,9 @@ func TestHaproxy_Init(t *testing.T) { rdb.Config = test.config if test.wantFail { - assert.False(t, rdb.Init()) + assert.Error(t, rdb.Init()) } else { - assert.True(t, rdb.Init()) + assert.NoError(t, rdb.Init()) } }) } @@ -107,9 +107,9 @@ func TestHaproxy_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, h.Check()) + assert.Error(t, h.Check()) } else { - assert.True(t, h.Check()) + assert.NoError(t, h.Check()) } }) } @@ -185,7 +185,7 @@ func prepareCaseHaproxyV231Metrics(t *testing.T) (*Haproxy, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -213,7 +213,7 @@ application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -226,7 +226,7 @@ func prepareCase404Response(t *testing.T) (*Haproxy, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -235,7 +235,7 @@ func prepareCaseConnectionRefused(t *testing.T) (*Haproxy, func()) { t.Helper() h := New() h.URL = "http://127.0.0.1:38001" - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, func() {} } diff --git a/modules/hdfs/collect.go b/modules/hdfs/collect.go index 9879787cd..8d613e074 100644 --- a/modules/hdfs/collect.go +++ b/modules/hdfs/collect.go @@ -11,68 +11,51 @@ import ( "github.com/netdata/go.d.plugin/pkg/stm" ) -type ( - rawData map[string]json.RawMessage - rawJMX struct { - Beans []rawData +func (h *HDFS) collect() (map[string]int64, error) { + var raw rawJMX + err := h.client.doOKWithDecodeJSON(&raw) + if err != nil { + return nil, err } -) - -func (r rawJMX) isEmpty() bool { - return len(r.Beans) == 0 -} -func (r rawJMX) find(f func(rawData) bool) rawData { - for _, v := range r.Beans { - if f(v) { - return v - } + if raw.isEmpty() { + return nil, errors.New("empty response") } - return nil -} - -func (r rawJMX) findJvm() rawData { - f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" } - return r.find(f) -} - -func (r rawJMX) findRPCActivity() rawData { - f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") } - return r.find(f) -} - -func (r rawJMX) findFSNameSystem() rawData { - f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" } - return r.find(f) -} -func (r rawJMX) findFSDatasetState() rawData { - f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" } - return r.find(f) -} + mx := h.collectRawJMX(raw) -func (r rawJMX) findDataNodeActivity() rawData { - f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") } - return r.find(f) + return stm.ToMap(mx), nil } -func (h *HDFS) collect() (map[string]int64, error) { +func (h *HDFS) determineNodeType() (nodeType, error) { var raw rawJMX err := h.client.doOKWithDecodeJSON(&raw) if err != nil { - return nil, err + return "", err } if raw.isEmpty() { - return nil, errors.New("empty response") + return "", errors.New("empty response") } - mx := h.collectRawJMX(raw) + jvm := raw.findJvm() + if jvm == nil { + return "", errors.New("couldn't find jvm in response") + } - return stm.ToMap(mx), nil + v, ok := jvm["tag.ProcessName"] + if !ok { + return "", errors.New("couldn't find process name in JvmMetrics") + } + + t := nodeType(strings.Trim(string(v), "\"")) + if t == nameNodeType || t == dataNodeType { + return t, nil + } + return "", errors.New("unknown node type") } -func (h HDFS) collectRawJMX(raw rawJMX) *metrics { +func (h *HDFS) collectRawJMX(raw rawJMX) *metrics { var mx metrics switch h.nodeType { default: @@ -85,7 +68,7 @@ func (h HDFS) collectRawJMX(raw rawJMX) *metrics { return &mx } -func (h HDFS) collectNameNode(mx *metrics, raw rawJMX) { +func (h *HDFS) collectNameNode(mx *metrics, raw rawJMX) { err := h.collectJVM(mx, raw) if err != nil { h.Debugf("error on collecting jvm : %v", err) @@ -102,7 +85,7 @@ func (h HDFS) collectNameNode(mx *metrics, raw rawJMX) { } } -func (h HDFS) collectDataNode(mx *metrics, raw rawJMX) { +func (h *HDFS) collectDataNode(mx *metrics, raw rawJMX) { err := h.collectJVM(mx, raw) if err != nil { h.Debugf("error on collecting jvm : %v", err) @@ -124,7 +107,7 @@ func (h HDFS) collectDataNode(mx *metrics, raw rawJMX) { } } -func (h HDFS) collectJVM(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectJVM(mx *metrics, raw rawJMX) error { v := raw.findJvm() if v == nil { return nil @@ -140,7 +123,7 @@ func (h HDFS) collectJVM(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error { v := raw.findRPCActivity() if v == nil { return nil @@ -156,7 +139,7 @@ func (h HDFS) collectRPCActivity(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error { v := raw.findFSNameSystem() if v == nil { return nil @@ -174,7 +157,7 @@ func (h HDFS) collectFSNameSystem(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error { v := raw.findFSDatasetState() if v == nil { return nil @@ -193,7 +176,7 @@ func (h HDFS) collectFSDatasetState(mx *metrics, raw rawJMX) error { return nil } -func (h HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error { +func (h *HDFS) collectDataNodeActivity(mx *metrics, raw rawJMX) error { v := raw.findDataNodeActivity() if v == nil { return nil diff --git a/modules/hdfs/config_schema.json b/modules/hdfs/config_schema.json index 483c49301..557fdde76 100644 --- a/modules/hdfs/config_schema.json +++ b/modules/hdfs/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/hdfs job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "HDFS collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the HDFS server built-in webserver.", + "type": "string", + "default": "http://127.0.0.1:50070/jmx" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/hdfs/hdfs.go b/modules/hdfs/hdfs.go index aa0b2efe2..1c939bdf2 100644 --- a/modules/hdfs/hdfs.go +++ b/modules/hdfs/hdfs.go @@ -5,7 +5,6 @@ package hdfs import ( _ "embed" "errors" - "strings" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -31,7 +30,8 @@ func New() *HDFS { URL: "http://127.0.0.1:50070/jmx", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}}, + Timeout: web.Duration(time.Second), + }, }, } @@ -49,84 +49,63 @@ const ( // Config is the HDFS module configuration. type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } // HDFS HDFS module. type HDFS struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` nodeType client *client } -// Cleanup makes cleanup. -func (HDFS) Cleanup() {} - -func (h HDFS) createClient() (*client, error) { - httpClient, err := web.NewHTTPClient(h.Client) - if err != nil { - return nil, err - } - - return newClient(httpClient, h.Request), nil +func (h *HDFS) Configuration() any { + return h.Config } -func (h HDFS) determineNodeType() (nodeType, error) { - var raw rawJMX - err := h.client.doOKWithDecodeJSON(&raw) - if err != nil { - return "", err - } - - if raw.isEmpty() { - return "", errors.New("empty response") - } - - jvm := raw.findJvm() - if jvm == nil { - return "", errors.New("couldn't find jvm in response") - } - - v, ok := jvm["tag.ProcessName"] - if !ok { - return "", errors.New("couldn't find process name in JvmMetrics") - } - - t := nodeType(strings.Trim(string(v), "\"")) - if t == nameNodeType || t == dataNodeType { - return t, nil +// Init makes initialization. +func (h *HDFS) Init() error { + if err := h.validateConfig(); err != nil { + h.Errorf("config validation: %v", err) + return err } - return "", errors.New("unknown node type") -} -// Init makes initialization. -func (h *HDFS) Init() bool { cl, err := h.createClient() if err != nil { h.Errorf("error on creating client : %v", err) - return false + return err } h.client = cl - return true + return nil } // Check makes check. -func (h *HDFS) Check() bool { - t, err := h.determineNodeType() +func (h *HDFS) Check() error { + typ, err := h.determineNodeType() if err != nil { h.Errorf("error on node type determination : %v", err) - return false + return err } - h.nodeType = t + h.nodeType = typ - return len(h.Collect()) > 0 + mx, err := h.collect() + if err != nil { + h.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts returns Charts. -func (h HDFS) Charts() *Charts { +func (h *HDFS) Charts() *Charts { switch h.nodeType { default: return nil @@ -151,3 +130,10 @@ func (h *HDFS) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (h *HDFS) Cleanup() { + if h.client != nil && h.client.httpClient != nil { + h.client.httpClient.CloseIdleConnections() + } +} diff --git a/modules/hdfs/hdfs_test.go b/modules/hdfs/hdfs_test.go index dc5b7cf0e..1870c1f0a 100644 --- a/modules/hdfs/hdfs_test.go +++ b/modules/hdfs/hdfs_test.go @@ -32,14 +32,14 @@ func TestNew(t *testing.T) { func TestHDFS_Init(t *testing.T) { job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestHDFS_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { job := New() job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestHDFS_Check(t *testing.T) { @@ -52,9 +52,9 @@ func TestHDFS_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.NotZero(t, job.nodeType) } @@ -68,9 +68,9 @@ func TestHDFS_CheckDataNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.Equal(t, dataNodeType, job.nodeType) } @@ -84,9 +84,9 @@ func TestHDFS_CheckNameNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.Equal(t, nameNodeType, job.nodeType) } @@ -100,17 +100,17 @@ func TestHDFS_CheckErrorOnNodeTypeDetermination(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestHDFS_CheckNoResponse(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/jmx" - require.True(t, job.Init()) + require.NoError(t, job.Init()) - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestHDFS_Charts(t *testing.T) { @@ -151,8 +151,8 @@ func TestHDFS_CollectDataNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "dna_bytes_read": 80689178, @@ -203,8 +203,8 @@ func TestHDFS_CollectNameNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "fsns_blocks_total": 15, @@ -262,7 +262,7 @@ func TestHDFS_CollectUnknownNode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Panics(t, func() { _ = job.Collect() }) } @@ -270,7 +270,7 @@ func TestHDFS_CollectUnknownNode(t *testing.T) { func TestHDFS_CollectNoResponse(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/jmx" - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Nil(t, job.Collect()) } @@ -285,7 +285,7 @@ func TestHDFS_CollectReceiveInvalidResponse(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Nil(t, job.Collect()) } @@ -300,7 +300,7 @@ func TestHDFS_CollectReceive404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Nil(t, job.Collect()) } diff --git a/modules/hdfs/init.go b/modules/hdfs/init.go new file mode 100644 index 000000000..2fbcfc32d --- /dev/null +++ b/modules/hdfs/init.go @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package hdfs + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (h *HDFS) validateConfig() error { + if h.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (h *HDFS) createClient() (*client, error) { + httpClient, err := web.NewHTTPClient(h.Client) + if err != nil { + return nil, err + } + + return newClient(httpClient, h.Request), nil +} diff --git a/modules/hdfs/raw_data.go b/modules/hdfs/raw_data.go new file mode 100644 index 000000000..ab434ae17 --- /dev/null +++ b/modules/hdfs/raw_data.go @@ -0,0 +1,51 @@ +package hdfs + +import ( + "encoding/json" + "strings" +) + +type ( + rawData map[string]json.RawMessage + rawJMX struct { + Beans []rawData + } +) + +func (r rawJMX) isEmpty() bool { + return len(r.Beans) == 0 +} + +func (r rawJMX) find(f func(rawData) bool) rawData { + for _, v := range r.Beans { + if f(v) { + return v + } + } + return nil +} + +func (r rawJMX) findJvm() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"JvmMetrics\"" } + return r.find(f) +} + +func (r rawJMX) findRPCActivity() rawData { + f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"RpcActivityForPort") } + return r.find(f) +} + +func (r rawJMX) findFSNameSystem() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSNamesystem\"" } + return r.find(f) +} + +func (r rawJMX) findFSDatasetState() rawData { + f := func(data rawData) bool { return string(data["modelerType"]) == "\"FSDatasetState\"" } + return r.find(f) +} + +func (r rawJMX) findDataNodeActivity() rawData { + f := func(data rawData) bool { return strings.HasPrefix(string(data["modelerType"]), "\"DataNodeActivity") } + return r.find(f) +} diff --git a/modules/httpcheck/config_schema.json b/modules/httpcheck/config_schema.json index d344853f7..aec57c8e9 100644 --- a/modules/httpcheck/config_schema.json +++ b/modules/httpcheck/config_schema.json @@ -1,71 +1,78 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/httpcheck job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "accepted_statuses": { - "type": "array", - "items": { - "type": "integer" - } - }, - "response_match": { - "type": "string" - }, - "cookie_file": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/httpcheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "accepted_statuses": { + "type": "array", + "items": { + "type": "integer" + } + }, + "response_match": { + "type": "string" + }, + "cookie_file": { "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/httpcheck/httpcheck.go b/modules/httpcheck/httpcheck.go index abb2c821e..82ed0c7a9 100644 --- a/modules/httpcheck/httpcheck.go +++ b/modules/httpcheck/httpcheck.go @@ -4,6 +4,7 @@ package httpcheck import ( _ "embed" + "errors" "net/http" "regexp" "time" @@ -31,7 +32,7 @@ func New() *HTTPCheck { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, AcceptedStatuses: []int{200}, @@ -73,10 +74,14 @@ type HTTPCheck struct { metrics metrics } -func (hc *HTTPCheck) Init() bool { +func (hc *HTTPCheck) Configuration() any { + return hc.Config +} + +func (hc *HTTPCheck) Init() error { if err := hc.validateConfig(); err != nil { hc.Errorf("config validation: %v", err) - return false + return err } hc.charts = hc.initCharts() @@ -84,21 +89,21 @@ func (hc *HTTPCheck) Init() bool { httpClient, err := hc.initHTTPClient() if err != nil { hc.Errorf("init HTTP client: %v", err) - return false + return err } hc.httpClient = httpClient re, err := hc.initResponseMatchRegexp() if err != nil { hc.Errorf("init response match regexp: %v", err) - return false + return err } hc.reResponse = re hm, err := hc.initHeaderMatch() if err != nil { hc.Errorf("init header match: %v", err) - return false + return err } hc.headerMatch = hm @@ -107,17 +112,25 @@ func (hc *HTTPCheck) Init() bool { } hc.Debugf("using URL %s", hc.URL) - hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration) + hc.Debugf("using HTTP timeout %s", hc.Timeout.Duration()) hc.Debugf("using accepted HTTP statuses %v", hc.AcceptedStatuses) if hc.reResponse != nil { hc.Debugf("using response match regexp %s", hc.reResponse) } - return true + return nil } -func (hc *HTTPCheck) Check() bool { - return len(hc.Collect()) > 0 +func (hc *HTTPCheck) Check() error { + mx, err := hc.collect() + if err != nil { + hc.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (hc *HTTPCheck) Charts() *module.Charts { diff --git a/modules/httpcheck/httpcheck_test.go b/modules/httpcheck/httpcheck_test.go index 9d866e093..6c6f99243 100644 --- a/modules/httpcheck/httpcheck_test.go +++ b/modules/httpcheck/httpcheck_test.go @@ -56,9 +56,9 @@ func TestHTTPCheck_Init(t *testing.T) { httpCheck.Config = test.config if test.wantFail { - assert.False(t, httpCheck.Init()) + assert.Error(t, httpCheck.Init()) } else { - assert.True(t, httpCheck.Init()) + assert.NoError(t, httpCheck.Init()) } }) } @@ -80,7 +80,7 @@ func TestHTTPCheck_Charts(t *testing.T) { prepare: func(t *testing.T) *HTTPCheck { httpCheck := New() httpCheck.URL = "http://127.0.0.1:38001" - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) return httpCheck }, @@ -105,7 +105,7 @@ func TestHTTPCheck_Cleanup(t *testing.T) { assert.NotPanics(t, httpCheck.Cleanup) httpCheck.URL = "http://127.0.0.1:38001" - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) assert.NotPanics(t, httpCheck.Cleanup) } @@ -129,12 +129,12 @@ func TestHTTPCheck_Check(t *testing.T) { httpCheck, cleanup := test.prepare() defer cleanup() - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) if test.wantFail { - assert.False(t, httpCheck.Check()) + assert.Error(t, httpCheck.Check()) } else { - assert.True(t, httpCheck.Check()) + assert.NoError(t, httpCheck.Check()) } }) } @@ -438,7 +438,7 @@ func TestHTTPCheck_Collect(t *testing.T) { test.update(httpCheck) } - require.True(t, httpCheck.Init()) + require.NoError(t, httpCheck.Init()) var mx map[string]int64 @@ -475,11 +475,11 @@ func prepareSuccessCase() (*HTTPCheck, func()) { func prepareTimeoutCase() (*HTTPCheck, func()) { httpCheck := New() httpCheck.UpdateEvery = 1 - httpCheck.Timeout.Duration = time.Millisecond * 100 + httpCheck.Timeout = web.Duration(time.Millisecond * 100) srv := httptest.NewServer(http.HandlerFunc( func(w http.ResponseWriter, r *http.Request) { - time.Sleep(httpCheck.Timeout.Duration + time.Millisecond*100) + time.Sleep(httpCheck.Timeout.Duration() + time.Millisecond*100) })) httpCheck.URL = srv.URL diff --git a/modules/init.go b/modules/init.go index 9e44cf98a..5d510a465 100644 --- a/modules/init.go +++ b/modules/init.go @@ -5,82 +5,79 @@ package modules import ( _ "github.com/netdata/go.d.plugin/modules/activemq" _ "github.com/netdata/go.d.plugin/modules/apache" - _ "github.com/netdata/go.d.plugin/modules/bind" - _ "github.com/netdata/go.d.plugin/modules/cassandra" - _ "github.com/netdata/go.d.plugin/modules/chrony" - _ "github.com/netdata/go.d.plugin/modules/cockroachdb" - _ "github.com/netdata/go.d.plugin/modules/consul" - _ "github.com/netdata/go.d.plugin/modules/coredns" - _ "github.com/netdata/go.d.plugin/modules/couchbase" - _ "github.com/netdata/go.d.plugin/modules/couchdb" - _ "github.com/netdata/go.d.plugin/modules/dnsdist" - _ "github.com/netdata/go.d.plugin/modules/dnsmasq" - _ "github.com/netdata/go.d.plugin/modules/dnsmasq_dhcp" - _ "github.com/netdata/go.d.plugin/modules/dnsquery" - _ "github.com/netdata/go.d.plugin/modules/docker" - _ "github.com/netdata/go.d.plugin/modules/docker_engine" - _ "github.com/netdata/go.d.plugin/modules/dockerhub" - _ "github.com/netdata/go.d.plugin/modules/elasticsearch" - _ "github.com/netdata/go.d.plugin/modules/energid" - _ "github.com/netdata/go.d.plugin/modules/envoy" + //_ "github.com/netdata/go.d.plugin/modules/bind" + //_ "github.com/netdata/go.d.plugin/modules/cassandra" + //_ "github.com/netdata/go.d.plugin/modules/chrony" + //_ "github.com/netdata/go.d.plugin/modules/cockroachdb" + //_ "github.com/netdata/go.d.plugin/modules/consul" + //_ "github.com/netdata/go.d.plugin/modules/coredns" + //_ "github.com/netdata/go.d.plugin/modules/couchbase" + //_ "github.com/netdata/go.d.plugin/modules/couchdb" + //_ "github.com/netdata/go.d.plugin/modules/dnsdist" + //_ "github.com/netdata/go.d.plugin/modules/dnsmasq" + //_ "github.com/netdata/go.d.plugin/modules/dnsmasq_dhcp" + //_ "github.com/netdata/go.d.plugin/modules/dnsquery" + //_ "github.com/netdata/go.d.plugin/modules/docker" + //_ "github.com/netdata/go.d.plugin/modules/docker_engine" + //_ "github.com/netdata/go.d.plugin/modules/dockerhub" + //_ "github.com/netdata/go.d.plugin/modules/elasticsearch" + //_ "github.com/netdata/go.d.plugin/modules/envoy" _ "github.com/netdata/go.d.plugin/modules/example" - _ "github.com/netdata/go.d.plugin/modules/filecheck" - _ "github.com/netdata/go.d.plugin/modules/fluentd" - _ "github.com/netdata/go.d.plugin/modules/freeradius" - _ "github.com/netdata/go.d.plugin/modules/geth" - _ "github.com/netdata/go.d.plugin/modules/haproxy" - _ "github.com/netdata/go.d.plugin/modules/hdfs" - _ "github.com/netdata/go.d.plugin/modules/httpcheck" - _ "github.com/netdata/go.d.plugin/modules/isc_dhcpd" - _ "github.com/netdata/go.d.plugin/modules/k8s_kubelet" - _ "github.com/netdata/go.d.plugin/modules/k8s_kubeproxy" - _ "github.com/netdata/go.d.plugin/modules/k8s_state" - _ "github.com/netdata/go.d.plugin/modules/lighttpd" - _ "github.com/netdata/go.d.plugin/modules/logind" - _ "github.com/netdata/go.d.plugin/modules/logstash" - _ "github.com/netdata/go.d.plugin/modules/mongodb" - _ "github.com/netdata/go.d.plugin/modules/mysql" - _ "github.com/netdata/go.d.plugin/modules/nginx" - _ "github.com/netdata/go.d.plugin/modules/nginxplus" - _ "github.com/netdata/go.d.plugin/modules/nginxvts" - _ "github.com/netdata/go.d.plugin/modules/ntpd" - _ "github.com/netdata/go.d.plugin/modules/nvidia_smi" - _ "github.com/netdata/go.d.plugin/modules/nvme" - _ "github.com/netdata/go.d.plugin/modules/openvpn" - _ "github.com/netdata/go.d.plugin/modules/openvpn_status_log" - _ "github.com/netdata/go.d.plugin/modules/pgbouncer" - _ "github.com/netdata/go.d.plugin/modules/phpdaemon" - _ "github.com/netdata/go.d.plugin/modules/phpfpm" - _ "github.com/netdata/go.d.plugin/modules/pihole" - _ "github.com/netdata/go.d.plugin/modules/pika" - _ "github.com/netdata/go.d.plugin/modules/ping" - _ "github.com/netdata/go.d.plugin/modules/portcheck" - _ "github.com/netdata/go.d.plugin/modules/postgres" - _ "github.com/netdata/go.d.plugin/modules/powerdns" - _ "github.com/netdata/go.d.plugin/modules/powerdns_recursor" - _ "github.com/netdata/go.d.plugin/modules/prometheus" - _ "github.com/netdata/go.d.plugin/modules/proxysql" - _ "github.com/netdata/go.d.plugin/modules/pulsar" - _ "github.com/netdata/go.d.plugin/modules/rabbitmq" - _ "github.com/netdata/go.d.plugin/modules/redis" - _ "github.com/netdata/go.d.plugin/modules/scaleio" - _ "github.com/netdata/go.d.plugin/modules/snmp" - _ "github.com/netdata/go.d.plugin/modules/solr" - _ "github.com/netdata/go.d.plugin/modules/springboot2" - _ "github.com/netdata/go.d.plugin/modules/squidlog" - _ "github.com/netdata/go.d.plugin/modules/supervisord" - _ "github.com/netdata/go.d.plugin/modules/systemdunits" - _ "github.com/netdata/go.d.plugin/modules/tengine" - _ "github.com/netdata/go.d.plugin/modules/traefik" - _ "github.com/netdata/go.d.plugin/modules/unbound" - _ "github.com/netdata/go.d.plugin/modules/upsd" - _ "github.com/netdata/go.d.plugin/modules/vcsa" - _ "github.com/netdata/go.d.plugin/modules/vernemq" - _ "github.com/netdata/go.d.plugin/modules/vsphere" - _ "github.com/netdata/go.d.plugin/modules/weblog" - _ "github.com/netdata/go.d.plugin/modules/whoisquery" - _ "github.com/netdata/go.d.plugin/modules/windows" - _ "github.com/netdata/go.d.plugin/modules/wireguard" - _ "github.com/netdata/go.d.plugin/modules/x509check" - _ "github.com/netdata/go.d.plugin/modules/zookeeper" + //_ "github.com/netdata/go.d.plugin/modules/filecheck" + //_ "github.com/netdata/go.d.plugin/modules/fluentd" + //_ "github.com/netdata/go.d.plugin/modules/freeradius" + //_ "github.com/netdata/go.d.plugin/modules/geth" + //_ "github.com/netdata/go.d.plugin/modules/haproxy" + //_ "github.com/netdata/go.d.plugin/modules/hdfs" + //_ "github.com/netdata/go.d.plugin/modules/httpcheck" + //_ "github.com/netdata/go.d.plugin/modules/isc_dhcpd" + //_ "github.com/netdata/go.d.plugin/modules/k8s_kubelet" + //_ "github.com/netdata/go.d.plugin/modules/k8s_kubeproxy" + //_ "github.com/netdata/go.d.plugin/modules/k8s_state" + //_ "github.com/netdata/go.d.plugin/modules/lighttpd" + //_ "github.com/netdata/go.d.plugin/modules/logind" + //_ "github.com/netdata/go.d.plugin/modules/logstash" + //_ "github.com/netdata/go.d.plugin/modules/mongodb" + //_ "github.com/netdata/go.d.plugin/modules/mysql" + //_ "github.com/netdata/go.d.plugin/modules/nginx" + //_ "github.com/netdata/go.d.plugin/modules/nginxplus" + //_ "github.com/netdata/go.d.plugin/modules/nginxvts" + //_ "github.com/netdata/go.d.plugin/modules/ntpd" + //_ "github.com/netdata/go.d.plugin/modules/nvidia_smi" + //_ "github.com/netdata/go.d.plugin/modules/nvme" + //_ "github.com/netdata/go.d.plugin/modules/openvpn" + //_ "github.com/netdata/go.d.plugin/modules/openvpn_status_log" + //_ "github.com/netdata/go.d.plugin/modules/pgbouncer" + //_ "github.com/netdata/go.d.plugin/modules/phpdaemon" + //_ "github.com/netdata/go.d.plugin/modules/phpfpm" + //_ "github.com/netdata/go.d.plugin/modules/pihole" + //_ "github.com/netdata/go.d.plugin/modules/pika" + //_ "github.com/netdata/go.d.plugin/modules/ping" + //_ "github.com/netdata/go.d.plugin/modules/portcheck" + //_ "github.com/netdata/go.d.plugin/modules/postgres" + //_ "github.com/netdata/go.d.plugin/modules/powerdns" + //_ "github.com/netdata/go.d.plugin/modules/powerdns_recursor" + //_ "github.com/netdata/go.d.plugin/modules/prometheus" + //_ "github.com/netdata/go.d.plugin/modules/proxysql" + //_ "github.com/netdata/go.d.plugin/modules/pulsar" + //_ "github.com/netdata/go.d.plugin/modules/rabbitmq" + //_ "github.com/netdata/go.d.plugin/modules/redis" + //_ "github.com/netdata/go.d.plugin/modules/scaleio" + //_ "github.com/netdata/go.d.plugin/modules/snmp" + //_ "github.com/netdata/go.d.plugin/modules/squidlog" + //_ "github.com/netdata/go.d.plugin/modules/supervisord" + //_ "github.com/netdata/go.d.plugin/modules/systemdunits" + //_ "github.com/netdata/go.d.plugin/modules/tengine" + //_ "github.com/netdata/go.d.plugin/modules/traefik" + //_ "github.com/netdata/go.d.plugin/modules/unbound" + //_ "github.com/netdata/go.d.plugin/modules/upsd" + //_ "github.com/netdata/go.d.plugin/modules/vcsa" + //_ "github.com/netdata/go.d.plugin/modules/vernemq" + //_ "github.com/netdata/go.d.plugin/modules/vsphere" + //_ "github.com/netdata/go.d.plugin/modules/weblog" + //_ "github.com/netdata/go.d.plugin/modules/whoisquery" + //_ "github.com/netdata/go.d.plugin/modules/windows" + //_ "github.com/netdata/go.d.plugin/modules/wireguard" + //_ "github.com/netdata/go.d.plugin/modules/x509check" + //_ "github.com/netdata/go.d.plugin/modules/zookeeper" ) diff --git a/modules/isc_dhcpd/config_schema.json b/modules/isc_dhcpd/config_schema.json index ed860cbeb..329deece6 100644 --- a/modules/isc_dhcpd/config_schema.json +++ b/modules/isc_dhcpd/config_schema.json @@ -1,36 +1,43 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/isc_dhcpd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "leases_path": { - "type": "string" - }, - "pools": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/isc_dhcpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "leases_path": { + "type": "string" + }, + "pools": { + "type": "array", + "items": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "networks": { + "type": "string" + } }, - "networks": { - "type": "string" - } - }, - "required": [ - "name", - "networks" - ] + "required": [ + "name", + "networks" + ] + } } - } + }, + "required": [ + "name", + "leases_path", + "pools" + ] }, - "required": [ - "name", - "leases_path", - "pools" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/isc_dhcpd/init.go b/modules/isc_dhcpd/init.go index 847a4590b..de26499b0 100644 --- a/modules/isc_dhcpd/init.go +++ b/modules/isc_dhcpd/init.go @@ -15,7 +15,7 @@ type ipPool struct { addresses iprange.Pool } -func (d DHCPd) validateConfig() error { +func (d *DHCPd) validateConfig() error { if d.Config.LeasesPath == "" { return errors.New("'lease_path' parameter not set") } @@ -33,7 +33,7 @@ func (d DHCPd) validateConfig() error { return nil } -func (d DHCPd) initPools() ([]ipPool, error) { +func (d *DHCPd) initPools() ([]ipPool, error) { var pools []ipPool for i, cfg := range d.Pools { rs, err := iprange.ParseRanges(cfg.Networks) @@ -50,7 +50,7 @@ func (d DHCPd) initPools() ([]ipPool, error) { return pools, nil } -func (d DHCPd) initCharts(pools []ipPool) (*module.Charts, error) { +func (d *DHCPd) initCharts(pools []ipPool) (*module.Charts, error) { charts := &module.Charts{} if err := charts.Add(activeLeasesTotalChart.Copy()); err != nil { diff --git a/modules/isc_dhcpd/isc_dhcpd.go b/modules/isc_dhcpd/isc_dhcpd.go index e1f4e5764..72ceaca2c 100644 --- a/modules/isc_dhcpd/isc_dhcpd.go +++ b/modules/isc_dhcpd/isc_dhcpd.go @@ -4,6 +4,7 @@ package isc_dhcpd import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -22,6 +23,16 @@ func init() { }) } +func New() *DHCPd { + return &DHCPd{ + Config: Config{ + LeasesPath: "/var/lib/dhcp/dhcpd.leases", + }, + + collected: make(map[string]int64), + } +} + type ( Config struct { LeasesPath string `yaml:"leases_path"` @@ -43,46 +54,47 @@ type DHCPd struct { collected map[string]int64 } -func New() *DHCPd { - return &DHCPd{ - Config: Config{ - LeasesPath: "/var/lib/dhcp/dhcpd.leases", - }, - - collected: make(map[string]int64), - } +func (d *DHCPd) Configuration() any { + return d.Config } -func (DHCPd) Cleanup() {} - -func (d *DHCPd) Init() bool { +func (d *DHCPd) Init() error { err := d.validateConfig() if err != nil { d.Errorf("config validation: %v", err) - return false + return err } pools, err := d.initPools() if err != nil { d.Errorf("ip pools init: %v", err) - return false + return err } d.pools = pools charts, err := d.initCharts(pools) if err != nil { d.Errorf("charts init: %v", err) - return false + return err } d.charts = charts d.Debugf("monitoring leases file: %v", d.Config.LeasesPath) d.Debugf("monitoring ip pools: %v", d.Config.Pools) - return true + + return nil } -func (d *DHCPd) Check() bool { - return len(d.Collect()) > 0 +func (d *DHCPd) Check() error { + mx, err := d.collect() + if err != nil { + d.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (d *DHCPd) Charts() *module.Charts { @@ -101,3 +113,5 @@ func (d *DHCPd) Collect() map[string]int64 { return mx } + +func (d *DHCPd) Cleanup() {} diff --git a/modules/isc_dhcpd/isc_dhcpd_test.go b/modules/isc_dhcpd/isc_dhcpd_test.go index 72980e469..ab03f3a0b 100644 --- a/modules/isc_dhcpd/isc_dhcpd_test.go +++ b/modules/isc_dhcpd/isc_dhcpd_test.go @@ -67,9 +67,9 @@ func TestDHCPd_Init(t *testing.T) { dhcpd.Config = test.config if test.wantFail { - assert.False(t, dhcpd.Init()) + assert.Error(t, dhcpd.Init()) } else { - assert.True(t, dhcpd.Init()) + assert.NoError(t, dhcpd.Init()) } }) } @@ -91,12 +91,12 @@ func TestDHCPd_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dhcpd := test.prepare() - require.True(t, dhcpd.Init()) + require.NoError(t, dhcpd.Init()) if test.wantFail { - assert.False(t, dhcpd.Check()) + assert.Error(t, dhcpd.Check()) } else { - assert.True(t, dhcpd.Check()) + assert.NoError(t, dhcpd.Check()) } }) } @@ -108,7 +108,7 @@ func TestDHCPd_Charts(t *testing.T) { dhcpd.Pools = []PoolConfig{ {Name: "name", Networks: "192.0.2.0/24"}, } - require.True(t, dhcpd.Init()) + require.NoError(t, dhcpd.Init()) assert.NotNil(t, dhcpd.Charts()) } @@ -209,7 +209,7 @@ func TestDHCPd_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { dhcpd := test.prepare() - require.True(t, dhcpd.Init()) + require.NoError(t, dhcpd.Init()) collected := dhcpd.Collect() diff --git a/modules/k8s_kubelet/config_schema.json b/modules/k8s_kubelet/config_schema.json index 6e42187f2..a581764f4 100644 --- a/modules/k8s_kubelet/config_schema.json +++ b/modules/k8s_kubelet/config_schema.json @@ -1,62 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/k8s_kubelet job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Kubelet collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "url": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Kubelet metrics endpoint.", + "type": "string", + "default": "http://127.0.0.1:10255/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "token_path": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "username": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/k8s_kubelet/init.go b/modules/k8s_kubelet/init.go new file mode 100644 index 000000000..f9fcda8ce --- /dev/null +++ b/modules/k8s_kubelet/init.go @@ -0,0 +1,35 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubelet + +import ( + "errors" + "os" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (k *Kubelet) validateConfig() error { + if k.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (k *Kubelet) initAuthToken() string { + bs, err := os.ReadFile(k.TokenPath) + if err != nil { + k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err) + } + return string(bs) +} + +func (k *Kubelet) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(k.Client) + if err != nil { + return nil, err + } + + return prometheus.New(httpClient, k.Request), nil +} diff --git a/modules/k8s_kubelet/kubelet.go b/modules/k8s_kubelet/kubelet.go index 7f62c9f30..b88e62817 100644 --- a/modules/k8s_kubelet/kubelet.go +++ b/modules/k8s_kubelet/kubelet.go @@ -4,7 +4,7 @@ package k8s_kubelet import ( _ "embed" - "os" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/prometheus" @@ -29,72 +29,82 @@ func init() { // New creates Kubelet with default values. func New() *Kubelet { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:10255/metrics", - Headers: make(map[string]string), - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &Kubelet{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:10255/metrics", + Headers: make(map[string]string), + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, + TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token", }, - TokenPath: "/var/run/secrets/kubernetes.io/serviceaccount/token", - } - return &Kubelet{ - Config: config, charts: charts.Copy(), collectedVMPlugins: make(map[string]bool), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - TokenPath string `yaml:"token_path"` - } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` - Kubelet struct { - module.Base - Config `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` + TokenPath string `yaml:"token_path" json:"token_path"` +} - prom prometheus.Prometheus - charts *Charts - // volume_manager_total_volumes - collectedVMPlugins map[string]bool - } -) +type Kubelet struct { + module.Base + Config `yaml:",inline" json:",inline"` -// Cleanup makes cleanup. -func (Kubelet) Cleanup() {} + prom prometheus.Prometheus + charts *Charts + // volume_manager_total_volumes + collectedVMPlugins map[string]bool +} + +func (k *Kubelet) Configuration() any { + return k.Config +} // Init makes initialization. -func (k *Kubelet) Init() bool { - b, err := os.ReadFile(k.TokenPath) - if err != nil { - k.Warningf("error on reading service account token from '%s': %v", k.TokenPath, err) - } else { - k.Request.Headers["Authorization"] = "Bearer " + string(b) +func (k *Kubelet) Init() error { + if err := k.validateConfig(); err != nil { + k.Errorf("config validation: %v", err) + return err } - client, err := web.NewHTTPClient(k.Client) + prom, err := k.initPrometheusClient() if err != nil { - k.Errorf("error on creating http client: %v", err) - return false + k.Error(err) + return err + } + k.prom = prom + + if tok := k.initAuthToken(); tok != "" { + k.Request.Headers["Authorization"] = "Bearer " + tok } - k.prom = prometheus.New(client, k.Request) - return true + return nil } // Check makes check. -func (k *Kubelet) Check() bool { - return len(k.Collect()) > 0 +func (k *Kubelet) Check() error { + mx, err := k.collect() + if err != nil { + k.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts creates Charts. -func (k Kubelet) Charts() *Charts { +func (k *Kubelet) Charts() *Charts { return k.charts } @@ -109,3 +119,10 @@ func (k *Kubelet) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (k *Kubelet) Cleanup() { + if k.prom != nil && k.prom.HTTPClient() != nil { + k.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/k8s_kubelet/kubelet_test.go b/modules/k8s_kubelet/kubelet_test.go index a69a0724b..42331b891 100644 --- a/modules/k8s_kubelet/kubelet_test.go +++ b/modules/k8s_kubelet/kubelet_test.go @@ -37,14 +37,14 @@ func TestKubelet_Cleanup(t *testing.T) { } func TestKubelet_Init(t *testing.T) { - assert.True(t, New().Init()) + assert.NoError(t, New().Init()) } func TestKubelet_Init_ReadServiceAccountToken(t *testing.T) { job := New() job.TokenPath = "testdata/token.txt" - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.Equal(t, "Bearer "+string(testTokenData), job.Request.Headers["Authorization"]) } @@ -52,7 +52,7 @@ func TestKubelet_InitErrorOnCreatingClientWrongTLSCA(t *testing.T) { job := New() job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestKubelet_Check(t *testing.T) { @@ -65,15 +65,15 @@ func TestKubelet_Check(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestKubelet_Check_ConnectionRefused(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubelet_Collect(t *testing.T) { @@ -86,8 +86,8 @@ func TestKubelet_Collect(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "apiserver_audit_requests_rejected_total": 0, @@ -185,8 +185,8 @@ func TestKubelet_Collect_ReceiveInvalidResponse(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubelet_Collect_Receive404(t *testing.T) { @@ -199,6 +199,6 @@ func TestKubelet_Collect_Receive404(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/k8s_kubeproxy/config_schema.json b/modules/k8s_kubeproxy/config_schema.json index c26231397..c164c5319 100644 --- a/modules/k8s_kubeproxy/config_schema.json +++ b/modules/k8s_kubeproxy/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/k8s_kubeproxy job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Kubeproxy collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "Kubeproxy metrics endpoint URL.", + "type": "string", + "default": "http://127.0.0.1:10249/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/k8s_kubeproxy/init.go b/modules/k8s_kubeproxy/init.go new file mode 100644 index 000000000..39b46d353 --- /dev/null +++ b/modules/k8s_kubeproxy/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package k8s_kubeproxy + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (kp *KubeProxy) validateConfig() error { + if kp.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (kp *KubeProxy) initPrometheusClient() (prometheus.Prometheus, error) { + httpClient, err := web.NewHTTPClient(kp.Client) + if err != nil { + return nil, err + } + + return prometheus.New(httpClient, kp.Request), nil +} diff --git a/modules/k8s_kubeproxy/kubeproxy.go b/modules/k8s_kubeproxy/kubeproxy.go index a681619c4..d089f9375 100644 --- a/modules/k8s_kubeproxy/kubeproxy.go +++ b/modules/k8s_kubeproxy/kubeproxy.go @@ -4,17 +4,12 @@ package k8s_kubeproxy import ( _ "embed" + "errors" "time" + "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/pkg/prometheus" "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/netdata/go.d.plugin/agent/module" -) - -const ( - defaultURL = "http://127.0.0.1:10249/metrics" - defaultHTTPTimeout = time.Second * 2 ) //go:embed "config_schema.json" @@ -33,64 +28,73 @@ func init() { // New creates KubeProxy with default values. func New() *KubeProxy { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &KubeProxy{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:10249/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, - } - return &KubeProxy{ - Config: config, charts: charts.Copy(), } } // Config is the KubeProxy module configuration. type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } // KubeProxy is KubeProxy module. type KubeProxy struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` prom prometheus.Prometheus charts *Charts } -// Cleanup makes cleanup. -func (KubeProxy) Cleanup() {} +func (kp *KubeProxy) Configuration() any { + return kp.Config +} // Init makes initialization. -func (kp *KubeProxy) Init() bool { - if kp.URL == "" { - kp.Error("URL not set") - return false +func (kp *KubeProxy) Init() error { + if err := kp.validateConfig(); err != nil { + kp.Errorf("config validation: %v", err) + return err } - client, err := web.NewHTTPClient(kp.Client) + prom, err := kp.initPrometheusClient() if err != nil { - kp.Errorf("error on creating http client : %v", err) - return false + kp.Error(err) + return err } + kp.prom = prom - kp.prom = prometheus.New(client, kp.Request) - - return true + return nil } // Check makes check. -func (kp *KubeProxy) Check() bool { - return len(kp.Collect()) > 0 +func (kp *KubeProxy) Check() error { + mx, err := kp.collect() + if err != nil { + kp.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts creates Charts. -func (kp KubeProxy) Charts() *Charts { +func (kp *KubeProxy) Charts() *Charts { return kp.charts } @@ -105,3 +109,10 @@ func (kp *KubeProxy) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (kp *KubeProxy) Cleanup() { + if kp.prom != nil && kp.prom.HTTPClient() != nil { + kp.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/k8s_kubeproxy/kubeproxy_test.go b/modules/k8s_kubeproxy/kubeproxy_test.go index 4c1831a99..f2346055e 100644 --- a/modules/k8s_kubeproxy/kubeproxy_test.go +++ b/modules/k8s_kubeproxy/kubeproxy_test.go @@ -14,24 +14,22 @@ import ( var testMetrics, _ = os.ReadFile("testdata/metrics.txt") -func TestNew(t *testing.T) { - job := New() - - assert.IsType(t, (*KubeProxy)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +func TestKubeProxy_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) } -func TestKubeProxy_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } - -func TestKubeProxy_Cleanup(t *testing.T) { New().Cleanup() } +func TestKubeProxy_Cleanup(t *testing.T) { + New().Cleanup() +} -func TestKubeProxy_Init(t *testing.T) { assert.True(t, New().Init()) } +func TestKubeProxy_Init(t *testing.T) { + assert.NoError(t, New().Init()) +} func TestKubeProxy_InitNG(t *testing.T) { job := New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestKubeProxy_Check(t *testing.T) { @@ -44,15 +42,15 @@ func TestKubeProxy_Check(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestKubeProxy_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubeProxy_Collect(t *testing.T) { @@ -65,8 +63,8 @@ func TestKubeProxy_Collect(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "sync_proxy_rules_count": 2669, @@ -108,8 +106,8 @@ func TestKubeProxy_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestKubeProxy_404(t *testing.T) { @@ -122,6 +120,6 @@ func TestKubeProxy_404(t *testing.T) { job := New() job.URL = ts.URL + "/metrics" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/k8s_state/config_schema.json b/modules/k8s_state/config_schema.json index 42b6b0fd6..435b0748b 100644 --- a/modules/k8s_state/config_schema.json +++ b/modules/k8s_state/config_schema.json @@ -1,13 +1,21 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/k8s_state job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Kubernetes Cluster State collector configuration.", + "type": "object", + "properties": { + "update_every": { + "title": "Update Every", + "description": "The data collection frequency in seconds.", + "minimum": 1, + "default": 1, + "type": "integer" + } } }, - "required": [ - "name" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/k8s_state/kube_state.go b/modules/k8s_state/kube_state.go index 3a3046e47..ab43a689a 100644 --- a/modules/k8s_state/kube_state.go +++ b/modules/k8s_state/kube_state.go @@ -5,6 +5,8 @@ package k8s_state import ( "context" _ "embed" + "errors" + "fmt" "sync" "time" @@ -37,15 +39,14 @@ func New() *KubeState { } } -type ( - discoverer interface { - run(ctx context.Context, in chan<- resource) - ready() bool - stopped() bool - } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` +} +type ( KubeState struct { module.Base + Config `yaml:",inline" json:",inline"` newKubeClient func() (kubernetes.Interface, error) @@ -65,13 +66,22 @@ type ( kubeClusterID string kubeClusterName string } + discoverer interface { + run(ctx context.Context, in chan<- resource) + ready() bool + stopped() bool + } ) -func (ks *KubeState) Init() bool { +func (ks *KubeState) Configuration() any { + return ks.Config +} + +func (ks *KubeState) Init() error { client, err := ks.initClient() if err != nil { ks.Errorf("client initialization: %v", err) - return false + return err } ks.client = client @@ -79,23 +89,25 @@ func (ks *KubeState) Init() bool { ks.discoverer = ks.initDiscoverer(ks.client) - return true + return nil } -func (ks *KubeState) Check() bool { +func (ks *KubeState) Check() error { if ks.client == nil || ks.discoverer == nil { ks.Error("not initialized job") - return false + return errors.New("not initialized") } ver, err := ks.client.Discovery().ServerVersion() if err != nil { - ks.Errorf("failed to connect to the Kubernetes API server: %v", err) - return false + err := fmt.Errorf("failed to connect to K8s API server: %v", err) + ks.Error(err) + return err } ks.Infof("successfully connected to the Kubernetes API server '%s'", ver) - return true + + return nil } func (ks *KubeState) Charts() *module.Charts { @@ -123,7 +135,7 @@ func (ks *KubeState) Cleanup() { c := make(chan struct{}) go func() { defer close(c); ks.wg.Wait() }() - t := time.NewTimer(time.Second * 3) + t := time.NewTimer(time.Second * 5) defer t.Stop() select { diff --git a/modules/k8s_state/kube_state_test.go b/modules/k8s_state/kube_state_test.go index 451028532..7bb4aa426 100644 --- a/modules/k8s_state/kube_state_test.go +++ b/modules/k8s_state/kube_state_test.go @@ -55,9 +55,9 @@ func TestKubeState_Init(t *testing.T) { ks := test.prepare() if test.wantFail { - assert.False(t, ks.Init()) + assert.Error(t, ks.Init()) } else { - assert.True(t, ks.Init()) + assert.NoError(t, ks.Init()) } }) } @@ -90,12 +90,12 @@ func TestKubeState_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { ks := test.prepare() - require.True(t, ks.Init()) + require.NoError(t, ks.Init()) if test.wantFail { - assert.False(t, ks.Check()) + assert.Error(t, ks.Check()) } else { - assert.True(t, ks.Check()) + assert.NoError(t, ks.Check()) } }) } @@ -663,8 +663,8 @@ func TestKubeState_Collect(t *testing.T) { ks := New() ks.newKubeClient = func() (kubernetes.Interface, error) { return test.client, nil } - require.True(t, ks.Init()) - require.True(t, ks.Check()) + require.NoError(t, ks.Init()) + require.NoError(t, ks.Check()) defer ks.Cleanup() for i, executeStep := range test.steps { diff --git a/modules/lighttpd/config_schema.json b/modules/lighttpd/config_schema.json index c1b51d065..187335b86 100644 --- a/modules/lighttpd/config_schema.json +++ b/modules/lighttpd/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/lighttpd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Lighttpd collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Lighttpd server status page to monitor.", + "type": "string", + "default": "http://127.0.0.1/server-status?auto" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/lighttpd/init.go b/modules/lighttpd/init.go new file mode 100644 index 000000000..f9f4baf37 --- /dev/null +++ b/modules/lighttpd/init.go @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package lighttpd + +import ( + "errors" + "fmt" + "strings" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (l *Lighttpd) validateConfig() error { + if l.URL == "" { + return errors.New("url not set") + } + if !strings.HasSuffix(l.URL, "?auto") { + return fmt.Errorf("bad URL '%s', should ends in '?auto'", l.URL) + } + return nil +} + +func (l *Lighttpd) initApiClient() (*apiClient, error) { + client, err := web.NewHTTPClient(l.Client) + if err != nil { + return nil, err + } + return newAPIClient(client, l.Request), nil +} diff --git a/modules/lighttpd/lighttpd.go b/modules/lighttpd/lighttpd.go index 2f98a96bf..58a93e0ff 100644 --- a/modules/lighttpd/lighttpd.go +++ b/modules/lighttpd/lighttpd.go @@ -4,7 +4,7 @@ package lighttpd import ( _ "embed" - "strings" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -22,70 +22,75 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1/server-status?auto" - defaultHTTPTimeout = time.Second * 2 -) - // New creates Lighttpd with default values. func New() *Lighttpd { - config := Config{ + return &Lighttpd{Config: Config{ HTTP: web.HTTP{ Request: web.Request{ - URL: defaultURL, + URL: "http://127.0.0.1/server-status?auto", }, Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + Timeout: web.Duration(time.Second * 2), }, }, - } - return &Lighttpd{Config: config} + }} } // Config is the Lighttpd module configuration. type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type Lighttpd struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` + apiClient *apiClient } -// Cleanup makes cleanup. -func (Lighttpd) Cleanup() {} +func (l *Lighttpd) Configuration() any { + return l.Config +} // Init makes initialization. -func (l *Lighttpd) Init() bool { - if l.URL == "" { - l.Error("URL not set") - return false - } - - if !strings.HasSuffix(l.URL, "?auto") { - l.Errorf("bad URL '%s', should ends in '?auto'", l.URL) - return false +func (l *Lighttpd) Init() error { + if err := l.validateConfig(); err != nil { + l.Errorf("config validation: %v", err) + return err } - client, err := web.NewHTTPClient(l.Client) + client, err := l.initApiClient() if err != nil { - l.Errorf("error on creating http client : %v", err) - return false + l.Error(err) + return err } - l.apiClient = newAPIClient(client, l.Request) + l.apiClient = client l.Debugf("using URL %s", l.URL) - l.Debugf("using timeout: %s", l.Timeout.Duration) + l.Debugf("using timeout: %s", l.Timeout.Duration()) - return true + return nil } // Check makes check -func (l *Lighttpd) Check() bool { return len(l.Collect()) > 0 } +func (l *Lighttpd) Check() error { + mx, err := l.collect() + if err != nil { + l.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil +} // Charts returns Charts. -func (l Lighttpd) Charts() *Charts { return charts.Copy() } +func (l Lighttpd) Charts() *Charts { + return charts.Copy() +} // Collect collects metrics. func (l *Lighttpd) Collect() map[string]int64 { @@ -98,3 +103,10 @@ func (l *Lighttpd) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (l *Lighttpd) Cleanup() { + if l.apiClient != nil && l.apiClient.httpClient != nil { + l.apiClient.httpClient.CloseIdleConnections() + } +} diff --git a/modules/lighttpd/lighttpd_test.go b/modules/lighttpd/lighttpd_test.go index e6a7b016e..781e2db97 100644 --- a/modules/lighttpd/lighttpd_test.go +++ b/modules/lighttpd/lighttpd_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -20,18 +19,10 @@ var ( func TestLighttpd_Cleanup(t *testing.T) { New().Cleanup() } -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestLighttpd_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -39,7 +30,7 @@ func TestLighttpd_InitNG(t *testing.T) { job := New() job.URL = "" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestLighttpd_Check(t *testing.T) { @@ -52,16 +43,16 @@ func TestLighttpd_Check(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestLighttpd_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/server-status?auto" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestLighttpd_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } @@ -76,8 +67,8 @@ func TestLighttpd_Collect(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "scoreboard_waiting": 125, @@ -113,8 +104,8 @@ func TestLighttpd_InvalidData(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestLighttpd_ApacheData(t *testing.T) { @@ -127,8 +118,8 @@ func TestLighttpd_ApacheData(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - require.False(t, job.Check()) + require.NoError(t, job.Init()) + require.Error(t, job.Check()) } func TestLighttpd_404(t *testing.T) { @@ -141,6 +132,6 @@ func TestLighttpd_404(t *testing.T) { job := New() job.URL = ts.URL + "/server-status?auto" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/logind/config_schema.json b/modules/logind/config_schema.json index b7ad53e9a..c20cf4611 100644 --- a/modules/logind/config_schema.json +++ b/modules/logind/config_schema.json @@ -1,19 +1,26 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/logind job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/logind job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/logind/logind.go b/modules/logind/logind.go index 456217e9f..7cbb33971 100644 --- a/modules/logind/logind.go +++ b/modules/logind/logind.go @@ -7,6 +7,7 @@ package logind import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,10 +30,10 @@ func init() { func New() *Logind { return &Logind{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, newLogindConn: func(cfg Config) (logindConnection, error) { - return newLogindConnection(cfg.Timeout.Duration) + return newLogindConnection(cfg.Timeout.Duration()) }, charts: charts.Copy(), } @@ -51,12 +52,24 @@ type Logind struct { charts *module.Charts } -func (l *Logind) Init() bool { - return true +func (l *Logind) Configuration() any { + return l.Config } -func (l *Logind) Check() bool { - return len(l.Collect()) > 0 +func (l *Logind) Init() error { + return nil +} + +func (l *Logind) Check() error { + mx, err := l.collect() + if err != nil { + l.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (l *Logind) Charts() *module.Charts { diff --git a/modules/logind/logind_test.go b/modules/logind/logind_test.go index 07b00c168..7aa35672a 100644 --- a/modules/logind/logind_test.go +++ b/modules/logind/logind_test.go @@ -32,9 +32,9 @@ func TestLogind_Init(t *testing.T) { l.Config = test.config if test.wantFail { - assert.False(t, l.Init()) + assert.Error(t, l.Init()) } else { - assert.True(t, l.Init()) + assert.NoError(t, l.Init()) } }) } @@ -55,15 +55,15 @@ func TestLogind_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(l *Logind) { l.Init() }, + prepare: func(l *Logind) { _ = l.Init() }, }, "after Check": { wantClose: true, - prepare: func(l *Logind) { l.Init(); l.Check() }, + prepare: func(l *Logind) { _ = l.Init(); _ = l.Check() }, }, "after Collect": { wantClose: true, - prepare: func(l *Logind) { l.Init(); l.Collect() }, + prepare: func(l *Logind) { _ = l.Init(); l.Collect() }, }, } @@ -119,13 +119,13 @@ func TestLogind_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { l := New() - require.True(t, l.Init()) + require.NoError(t, l.Init()) l.conn = test.prepare() if test.wantFail { - assert.False(t, l.Check()) + assert.Error(t, l.Check()) } else { - assert.True(t, l.Check()) + assert.NoError(t, l.Check()) } }) } @@ -193,7 +193,7 @@ func TestLogind_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { l := New() - require.True(t, l.Init()) + require.NoError(t, l.Init()) l.conn = test.prepare() mx := l.Collect() diff --git a/modules/logstash/config_schema.json b/modules/logstash/config_schema.json index 9e4d59642..9db808d1c 100644 --- a/modules/logstash/config_schema.json +++ b/modules/logstash/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/logstash job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Logstash collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Logstash monitoring API.", + "type": "string", + "default": "http://localhost:9600" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/logstash/logstash.go b/modules/logstash/logstash.go index 728267294..a3776c00f 100644 --- a/modules/logstash/logstash.go +++ b/modules/logstash/logstash.go @@ -4,6 +4,7 @@ package logstash import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *Logstash { URL: "http://localhost:9600", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -39,37 +40,54 @@ func New() *Logstash { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type Logstash struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` + httpClient *http.Client charts *module.Charts - pipelines map[string]bool + + pipelines map[string]bool } -func (l *Logstash) Init() bool { +func (l *Logstash) Configuration() any { + return l.Config +} + +func (l *Logstash) Init() error { if l.URL == "" { l.Error("config validation: 'url' cannot be empty") - return false + return errors.New("url not set") } httpClient, err := web.NewHTTPClient(l.Client) if err != nil { l.Errorf("init HTTP client: %v", err) - return false + return err } l.httpClient = httpClient l.Debugf("using URL %s", l.URL) - l.Debugf("using timeout: %s", l.Timeout.Duration) - return true + l.Debugf("using timeout: %s", l.Timeout.Duration()) + + return nil } -func (l *Logstash) Check() bool { - return len(l.Collect()) > 0 +func (l *Logstash) Check() error { + mx, err := l.collect() + if err != nil { + l.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (l *Logstash) Charts() *module.Charts { diff --git a/modules/logstash/logstash_test.go b/modules/logstash/logstash_test.go index 2b5fd32d5..81e86c414 100644 --- a/modules/logstash/logstash_test.go +++ b/modules/logstash/logstash_test.go @@ -52,9 +52,9 @@ func TestLogstash_Init(t *testing.T) { ls.Config = test.config if test.wantFail { - assert.False(t, ls.Init()) + assert.Error(t, ls.Init()) } else { - assert.True(t, ls.Init()) + assert.NoError(t, ls.Init()) } }) } @@ -97,9 +97,9 @@ func TestLogstash_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, ls.Check()) + assert.Error(t, ls.Check()) } else { - assert.True(t, ls.Check()) + assert.NoError(t, ls.Check()) } }) } @@ -202,7 +202,7 @@ func caseValidResponse(t *testing.T) (*Logstash, func()) { })) ls := New() ls.URL = srv.URL - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, srv.Close } @@ -215,7 +215,7 @@ func caseInvalidDataResponse(t *testing.T) (*Logstash, func()) { })) ls := New() ls.URL = srv.URL - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, srv.Close } @@ -224,7 +224,7 @@ func caseConnectionRefused(t *testing.T) (*Logstash, func()) { t.Helper() ls := New() ls.URL = "http://127.0.0.1:65001" - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, func() {} } @@ -237,7 +237,7 @@ func case404(t *testing.T) (*Logstash, func()) { })) ls := New() ls.URL = srv.URL - require.True(t, ls.Init()) + require.NoError(t, ls.Init()) return ls, srv.Close } diff --git a/modules/mongodb/config_schema.json b/modules/mongodb/config_schema.json index 48afef584..a611c12c2 100644 --- a/modules/mongodb/config_schema.json +++ b/modules/mongodb/config_schema.json @@ -1,23 +1,30 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/mongodb job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/mongodb job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "uri": { + "type": "string" + }, + "timeout": { + "type": "number" + }, + "databases": { + "type": "string" + } }, - "uri": { - "type": "string" - }, - "timeout": { - "type": "number" - }, - "databases": { - "type": "string" - } + "required": [ + "name", + "uri" + ] }, - "required": [ - "name", - "uri" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/mongodb/mongodb.go b/modules/mongodb/mongodb.go index 522acbaa0..4b54dc7c5 100644 --- a/modules/mongodb/mongodb.go +++ b/modules/mongodb/mongodb.go @@ -4,6 +4,7 @@ package mongo import ( _ "embed" + "errors" "sync" "time" @@ -68,22 +69,34 @@ type Mongo struct { shards map[string]bool } -func (m *Mongo) Init() bool { +func (m *Mongo) Configuration() any { + return m.Config +} + +func (m *Mongo) Init() error { if err := m.verifyConfig(); err != nil { m.Errorf("config validation: %v", err) - return false + return err } if err := m.initDatabaseSelector(); err != nil { m.Errorf("init database selector: %v", err) - return false + return err } - return true + return nil } -func (m *Mongo) Check() bool { - return len(m.Collect()) > 0 +func (m *Mongo) Check() error { + mx, err := m.collect() + if err != nil { + m.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (m *Mongo) Charts() *module.Charts { diff --git a/modules/mongodb/mongodb_test.go b/modules/mongodb/mongodb_test.go index 37da851ed..766ab8ee5 100644 --- a/modules/mongodb/mongodb_test.go +++ b/modules/mongodb/mongodb_test.go @@ -65,9 +65,9 @@ func TestMongo_Init(t *testing.T) { mongo.Config = test.config if test.wantFail { - assert.False(t, mongo.Init()) + assert.Error(t, mongo.Init()) } else { - assert.True(t, mongo.Init()) + assert.NoError(t, mongo.Init()) } }) } @@ -139,12 +139,12 @@ func TestMongo_Check(t *testing.T) { defer mongo.Cleanup() mongo.conn = test.prepare() - require.True(t, mongo.Init()) + require.NoError(t, mongo.Init()) if test.wantFail { - assert.False(t, mongo.Check()) + assert.Error(t, mongo.Check()) } else { - assert.True(t, mongo.Check()) + assert.NoError(t, mongo.Check()) } }) } @@ -590,7 +590,7 @@ func TestMongo_Collect(t *testing.T) { defer mongo.Cleanup() mongo.conn = test.prepare() - require.True(t, mongo.Init()) + require.NoError(t, mongo.Init()) mx := mongo.Collect() diff --git a/modules/mysql/collect.go b/modules/mysql/collect.go index 3ff0882ad..796ca22ff 100644 --- a/modules/mysql/collect.go +++ b/modules/mysql/collect.go @@ -97,7 +97,7 @@ func (m *MySQL) openConnection() error { db.SetConnMaxLifetime(10 * time.Minute) - ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration()) defer cancel() if err := db.PingContext(ctx); err != nil { @@ -145,7 +145,7 @@ func hasTableOpenCacheOverflowsMetrics(collected map[string]int64) bool { } func (m *MySQL) collectQuery(query string, assign func(column, value string, lineEnd bool)) (duration int64, err error) { - ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), m.Timeout.Duration()) defer cancel() s := time.Now() diff --git a/modules/mysql/config_schema.json b/modules/mysql/config_schema.json index 1db919824..691b46f19 100644 --- a/modules/mysql/config_schema.json +++ b/modules/mysql/config_schema.json @@ -1,29 +1,36 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/mysql job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/mysql job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "my.cnf": { + "type": "string" + }, + "update_every": { + "type": "integer" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "dsn": { - "type": "string" - }, - "my.cnf": { - "type": "string" - }, - "update_every": { - "type": "integer" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/mysql/mysql.go b/modules/mysql/mysql.go index c7016098f..90b583491 100644 --- a/modules/mysql/mysql.go +++ b/modules/mysql/mysql.go @@ -5,6 +5,7 @@ package mysql import ( "database/sql" _ "embed" + "errors" "strings" "sync" "time" @@ -31,7 +32,7 @@ func New() *MySQL { return &MySQL{ Config: Config{ DSN: "root@tcp(localhost:3306)/", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, charts: baseCharts.Copy(), @@ -92,36 +93,49 @@ type MySQL struct { varPerformanceSchema string } -func (m *MySQL) Init() bool { +func (m *MySQL) Configuration() any { + return m.Config +} + +func (m *MySQL) Init() error { if m.MyCNF != "" { dsn, err := dsnFromFile(m.MyCNF) if err != nil { m.Error(err) - return false + return err } m.DSN = dsn } if m.DSN == "" { - m.Error("DSN not set") - return false + m.Error("dsn not set") + return errors.New("dsn not set") } cfg, err := mysql.ParseDSN(m.DSN) if err != nil { m.Errorf("error on parsing DSN: %v", err) - return false + return err } cfg.Passwd = strings.Repeat("*", len(cfg.Passwd)) m.safeDSN = cfg.FormatDSN() m.Debugf("using DSN [%s]", m.DSN) - return true + + return nil } -func (m *MySQL) Check() bool { - return len(m.Collect()) > 0 +func (m *MySQL) Check() error { + mx, err := m.collect() + if err != nil { + m.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (m *MySQL) Charts() *module.Charts { diff --git a/modules/mysql/mysql_test.go b/modules/mysql/mysql_test.go index 283b13770..5f633b3a3 100644 --- a/modules/mysql/mysql_test.go +++ b/modules/mysql/mysql_test.go @@ -113,9 +113,9 @@ func TestMySQL_Init(t *testing.T) { mySQL.Config = test.config if test.wantFail { - assert.False(t, mySQL.Init()) + assert.Error(t, mySQL.Init()) } else { - assert.True(t, mySQL.Init()) + assert.NoError(t, mySQL.Init()) } }) } @@ -235,14 +235,14 @@ func TestMySQL_Check(t *testing.T) { my.db = db defer func() { _ = db.Close() }() - require.True(t, my.Init()) + require.NoError(t, my.Init()) test.prepareMock(t, mock) if test.wantFail { - assert.False(t, my.Check()) + assert.Error(t, my.Check()) } else { - assert.True(t, my.Check()) + assert.NoError(t, my.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -1607,7 +1607,7 @@ func TestMySQL_Collect(t *testing.T) { my.db = db defer func() { _ = db.Close() }() - require.True(t, my.Init()) + require.NoError(t, my.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/nginx/config_schema.json b/modules/nginx/config_schema.json index 58a6865da..ff93738e9 100644 --- a/modules/nginx/config_schema.json +++ b/modules/nginx/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nginx job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "NGINX collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the NGINX status page to monitor.", + "type": "string", + "default": "http://127.0.0.1/stub_status" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/nginx/nginx.go b/modules/nginx/nginx.go index 9acf1e72b..98e7da727 100644 --- a/modules/nginx/nginx.go +++ b/modules/nginx/nginx.go @@ -4,6 +4,7 @@ package nginx import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -21,74 +22,76 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1/stub_status" - defaultHTTPTimeout = time.Second -) - -// New creates Nginx with default values. func New() *Nginx { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &Nginx{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1/stub_status", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 1), + }, }, - }, - } - - return &Nginx{Config: config} + }} } -// Config is the Nginx module configuration. type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } -// Nginx nginx module. type Nginx struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` apiClient *apiClient } -// Cleanup makes cleanup. -func (Nginx) Cleanup() {} +func (n *Nginx) Configuration() any { + return n.Config +} -// Init makes initialization. -func (n *Nginx) Init() bool { +func (n *Nginx) Init() error { if n.URL == "" { n.Error("URL not set") - return false + return errors.New("url not set") } client, err := web.NewHTTPClient(n.Client) if err != nil { n.Error(err) - return false + return err } n.apiClient = newAPIClient(client, n.Request) n.Debugf("using URL %s", n.URL) - n.Debugf("using timeout: %s", n.Timeout.Duration) + n.Debugf("using timeout: %s", n.Timeout) - return true + return nil } -// Check makes check. -func (n *Nginx) Check() bool { return len(n.Collect()) > 0 } +func (n *Nginx) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") -// Charts creates Charts. -func (Nginx) Charts() *Charts { return charts.Copy() } + } + return nil +} + +func (n *Nginx) Charts() *Charts { + return charts.Copy() +} -// Collect collects metrics. func (n *Nginx) Collect() map[string]int64 { mx, err := n.collect() - if err != nil { n.Error(err) return nil @@ -96,3 +99,9 @@ func (n *Nginx) Collect() map[string]int64 { return mx } + +func (n *Nginx) Cleanup() { + if n.apiClient != nil && n.apiClient.httpClient != nil { + n.apiClient.httpClient.CloseIdleConnections() + } +} diff --git a/modules/nginx/nginx_test.go b/modules/nginx/nginx_test.go index ef115482e..b01884c9e 100644 --- a/modules/nginx/nginx_test.go +++ b/modules/nginx/nginx_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -18,20 +17,14 @@ var ( testTengineStatusData, _ = os.ReadFile("testdata/tengine-status.txt") ) -func TestNginx_Cleanup(t *testing.T) { New().Cleanup() } - -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +func TestNginx_Cleanup(t *testing.T) { + New().Cleanup() } func TestNginx_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -45,19 +38,21 @@ func TestNginx_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestNginx_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/us" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } -func TestNginx_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } +func TestNginx_Charts(t *testing.T) { + assert.NotNil(t, New().Charts()) +} func TestNginx_Collect(t *testing.T) { ts := httptest.NewServer( @@ -69,8 +64,8 @@ func TestNginx_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "accepts": 36, @@ -95,8 +90,8 @@ func TestNginx_CollectTengine(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "accepts": 1140, @@ -122,8 +117,8 @@ func TestNginx_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestNginx_404(t *testing.T) { @@ -136,6 +131,6 @@ func TestNginx_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/nginxplus/config_schema.json b/modules/nginxplus/config_schema.json index c1457d2d7..844ad2972 100644 --- a/modules/nginxplus/config_schema.json +++ b/modules/nginxplus/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nginxplus job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "NGINX Plus collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The base URL of the NGINX Plus webserver.", + "type": "string", + "default": "http://127.0.0.1" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/nginxplus/nginxplus.go b/modules/nginxplus/nginxplus.go index ba82242f8..060863923 100644 --- a/modules/nginxplus/nginxplus.go +++ b/modules/nginxplus/nginxplus.go @@ -4,6 +4,7 @@ package nginxplus import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *NginxPlus { URL: "http://127.0.0.1", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 1}, + Timeout: web.Duration(time.Second * 1), }, }, }, @@ -40,12 +41,14 @@ func New() *NginxPlus { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type NginxPlus struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts @@ -72,24 +75,36 @@ type NginxPlus struct { cache *cache } -func (n *NginxPlus) Init() bool { +func (n *NginxPlus) Configuration() any { + return n.Config +} + +func (n *NginxPlus) Init() error { if n.URL == "" { n.Error("config validation: 'url' can not be empty'") - return false + return errors.New("url not set") } client, err := web.NewHTTPClient(n.Client) if err != nil { n.Errorf("init HTTP client: %v", err) - return false + return err } n.httpClient = client - return true + return nil } -func (n *NginxPlus) Check() bool { - return len(n.Collect()) > 0 +func (n *NginxPlus) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (n *NginxPlus) Charts() *module.Charts { diff --git a/modules/nginxplus/nginxplus_test.go b/modules/nginxplus/nginxplus_test.go index 7bbe89557..fbc4d671e 100644 --- a/modules/nginxplus/nginxplus_test.go +++ b/modules/nginxplus/nginxplus_test.go @@ -80,9 +80,9 @@ func TestNginxPlus_Init(t *testing.T) { nginx.Config = test.config if test.wantFail { - assert.False(t, nginx.Init()) + assert.Error(t, nginx.Init()) } else { - assert.True(t, nginx.Init()) + assert.NoError(t, nginx.Init()) } }) } @@ -117,9 +117,9 @@ func TestNginxPlus_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, nginx.Check()) + assert.Error(t, nginx.Check()) } else { - assert.True(t, nginx.Check()) + assert.NoError(t, nginx.Check()) } }) } @@ -500,7 +500,7 @@ func caseAPI8AllRequestsOK(t *testing.T) (*NginxPlus, func()) { })) nginx := New() nginx.URL = srv.URL - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, srv.Close } @@ -542,7 +542,7 @@ func caseAPI8AllRequestsExceptStreamOK(t *testing.T) (*NginxPlus, func()) { })) nginx := New() nginx.URL = srv.URL - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, srv.Close } @@ -555,7 +555,7 @@ func caseInvalidDataResponse(t *testing.T) (*NginxPlus, func()) { })) nginx := New() nginx.URL = srv.URL - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, srv.Close } @@ -564,7 +564,7 @@ func caseConnectionRefused(t *testing.T) (*NginxPlus, func()) { t.Helper() nginx := New() nginx.URL = "http://127.0.0.1:65001" - require.True(t, nginx.Init()) + require.NoError(t, nginx.Init()) return nginx, func() {} } diff --git a/modules/nginxvts/config_schema.json b/modules/nginxvts/config_schema.json index a4b44429f..e34bb9553 100644 --- a/modules/nginxvts/config_schema.json +++ b/modules/nginxvts/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nginxvts job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "NGINX VTS module collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the NGINX VTS module status page.", + "type": "string", + "default": "http://localhost/status/format/json" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/nginxvts/init.go b/modules/nginxvts/init.go index 7ebf049ab..59896a8ef 100644 --- a/modules/nginxvts/init.go +++ b/modules/nginxvts/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (vts NginxVTS) validateConfig() error { +func (vts *NginxVTS) validateConfig() error { if vts.URL == "" { return errors.New("URL not set") } @@ -21,11 +21,11 @@ func (vts NginxVTS) validateConfig() error { return nil } -func (vts NginxVTS) initHTTPClient() (*http.Client, error) { +func (vts *NginxVTS) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(vts.Client) } -func (vts NginxVTS) initCharts() (*module.Charts, error) { +func (vts *NginxVTS) initCharts() (*module.Charts, error) { charts := module.Charts{} if err := charts.Add(*mainCharts.Copy()...); err != nil { diff --git a/modules/nginxvts/nginxvts.go b/modules/nginxvts/nginxvts.go index 1cc3a6014..322f7b997 100644 --- a/modules/nginxvts/nginxvts.go +++ b/modules/nginxvts/nginxvts.go @@ -4,6 +4,7 @@ package nginxvts import ( _ "embed" + "errors" "net/http" "time" @@ -32,7 +33,7 @@ func New() *NginxVTS { URL: "http://localhost/status/format/json", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -40,7 +41,9 @@ func New() *NginxVTS { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type NginxVTS struct { @@ -51,6 +54,10 @@ type NginxVTS struct { charts *module.Charts } +func (vts *NginxVTS) Configuration() any { + return vts.Config +} + func (vts *NginxVTS) Cleanup() { if vts.httpClient == nil { return @@ -58,11 +65,11 @@ func (vts *NginxVTS) Cleanup() { vts.httpClient.CloseIdleConnections() } -func (vts *NginxVTS) Init() bool { +func (vts *NginxVTS) Init() error { err := vts.validateConfig() if err != nil { vts.Errorf("check configuration: %v", err) - return false + return err } httpClient, err := vts.initHTTPClient() @@ -74,15 +81,23 @@ func (vts *NginxVTS) Init() bool { charts, err := vts.initCharts() if err != nil { vts.Errorf("init charts: %v", err) - return false + return err } vts.charts = charts - return true + return nil } -func (vts *NginxVTS) Check() bool { - return len(vts.Collect()) > 0 +func (vts *NginxVTS) Check() error { + mx, err := vts.collect() + if err != nil { + vts.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (vts *NginxVTS) Charts() *module.Charts { diff --git a/modules/nginxvts/nginxvts_test.go b/modules/nginxvts/nginxvts_test.go index ef204ad75..6333b1580 100644 --- a/modules/nginxvts/nginxvts_test.go +++ b/modules/nginxvts/nginxvts_test.go @@ -70,9 +70,9 @@ func TestNginxVTS_Init(t *testing.T) { es.Config = test.config if test.wantFail { - assert.False(t, es.Init()) + assert.Error(t, es.Init()) } else { - assert.True(t, es.Init()) + assert.NoError(t, es.Init()) assert.Equal(t, test.wantNumOfCharts, len(*es.Charts())) } }) @@ -96,9 +96,9 @@ func TestNginxVTS_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, vts.Check()) + assert.Error(t, vts.Check()) } else { - assert.True(t, vts.Check()) + assert.NoError(t, vts.Check()) } }) } @@ -197,7 +197,7 @@ func prepareNginxVTS(t *testing.T, createNginxVTS func() *NginxVTS) (vts *NginxV srv := prepareNginxVTSEndpoint() vts.URL = srv.URL - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, srv.Close } @@ -214,7 +214,7 @@ func prepareNginxVTSInvalidData(t *testing.T) (*NginxVTS, func()) { })) vts := New() vts.URL = srv.URL - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, srv.Close } @@ -227,7 +227,7 @@ func prepareNginxVTS404(t *testing.T) (*NginxVTS, func()) { })) vts := New() vts.URL = srv.URL - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, srv.Close } @@ -236,7 +236,7 @@ func prepareNginxVTSConnectionRefused(t *testing.T) (*NginxVTS, func()) { t.Helper() vts := New() vts.URL = "http://127.0.0.1:18080" - require.True(t, vts.Init()) + require.NoError(t, vts.Init()) return vts, func() {} } diff --git a/modules/ntpd/client.go b/modules/ntpd/client.go index 5164c80e8..8e111cd76 100644 --- a/modules/ntpd/client.go +++ b/modules/ntpd/client.go @@ -10,14 +10,14 @@ import ( ) func newNTPClient(c Config) (ntpConn, error) { - conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration) + conn, err := net.DialTimeout("udp", c.Address, c.Timeout.Duration()) if err != nil { return nil, err } client := &ntpClient{ conn: conn, - timeout: c.Timeout.Duration, + timeout: c.Timeout.Duration(), client: &control.NTPClient{Connection: conn}, } diff --git a/modules/ntpd/config_schema.json b/modules/ntpd/config_schema.json index ef360a7f9..5d54d35f9 100644 --- a/modules/ntpd/config_schema.json +++ b/modules/ntpd/config_schema.json @@ -1,26 +1,33 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/ntpd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/ntpd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_peers": { + "type": "boolean" + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "collect_peers": { - "type": "boolean" - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/ntpd/ntpd.go b/modules/ntpd/ntpd.go index 8bbc0ba4f..83be1c090 100644 --- a/modules/ntpd/ntpd.go +++ b/modules/ntpd/ntpd.go @@ -4,6 +4,8 @@ package ntpd import ( _ "embed" + "errors" + "fmt" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -25,7 +27,7 @@ func New() *NTPd { return &NTPd{ Config: Config{ Address: "127.0.0.1:123", - Timeout: web.Duration{Duration: time.Second * 3}, + Timeout: web.Duration(time.Second * 3), CollectPeers: false, }, charts: systemCharts.Copy(), @@ -65,26 +67,38 @@ type ( } ) -func (n *NTPd) Init() bool { +func (n *NTPd) Configuration() any { + return n.Config +} + +func (n *NTPd) Init() error { if n.Address == "" { n.Error("config validation: 'address' can not be empty") - return false + return errors.New("address not set") } txt := "0.0.0.0 127.0.0.0/8" r, err := iprange.ParseRanges(txt) if err != nil { - n.Errorf("error on parse ip range '%s': %v", txt, err) - return false + n.Errorf("error on parsing ip range '%s': %v", txt, err) + return fmt.Errorf("error on parsing ip range '%s': %v", txt, err) } n.peerIPAddrFilter = r - return true + return nil } -func (n *NTPd) Check() bool { - return len(n.Collect()) > 0 +func (n *NTPd) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (n *NTPd) Charts() *module.Charts { diff --git a/modules/ntpd/ntpd_test.go b/modules/ntpd/ntpd_test.go index 481d2d7e9..745e7341d 100644 --- a/modules/ntpd/ntpd_test.go +++ b/modules/ntpd/ntpd_test.go @@ -33,9 +33,9 @@ func TestNTPd_Init(t *testing.T) { n.Config = test.config if test.wantFail { - assert.False(t, n.Init()) + assert.Error(t, n.Init()) } else { - assert.True(t, n.Init()) + assert.NoError(t, n.Init()) } }) } @@ -56,15 +56,15 @@ func TestNTPd_Cleanup(t *testing.T) { }, "after Init": { wantClose: false, - prepare: func(n *NTPd) { n.Init() }, + prepare: func(n *NTPd) { _ = n.Init() }, }, "after Check": { wantClose: true, - prepare: func(n *NTPd) { n.Init(); n.Check() }, + prepare: func(n *NTPd) { _ = n.Init(); _ = n.Check() }, }, "after Collect": { wantClose: true, - prepare: func(n *NTPd) { n.Init(); n.Collect() }, + prepare: func(n *NTPd) { _ = n.Init(); n.Collect() }, }, } @@ -116,12 +116,12 @@ func TestNTPd_Check(t *testing.T) { t.Run(name, func(t *testing.T) { n := test.prepare() - require.True(t, n.Init()) + require.NoError(t, n.Init()) if test.wantFail { - assert.False(t, n.Check()) + assert.Error(t, n.Check()) } else { - assert.True(t, n.Check()) + assert.NoError(t, n.Check()) } }) } @@ -237,7 +237,7 @@ func TestNTPd_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { n := test.prepare() - require.True(t, n.Init()) + require.NoError(t, n.Init()) _ = n.Check() mx := n.Collect() diff --git a/modules/nvidia_smi/config_schema.json b/modules/nvidia_smi/config_schema.json index fc5b38e08..6ba679261 100644 --- a/modules/nvidia_smi/config_schema.json +++ b/modules/nvidia_smi/config_schema.json @@ -1,25 +1,32 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nvidia_smi job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nvidia_smi job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "binary_path": { + "type": "string" + }, + "use_csv_format": { + "type": "boolean" + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "binary_path": { - "type": "string" - }, - "use_csv_format": { - "type": "boolean" - } + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/nvidia_smi/exec.go b/modules/nvidia_smi/exec.go index 93e23057b..c4f1e3f2c 100644 --- a/modules/nvidia_smi/exec.go +++ b/modules/nvidia_smi/exec.go @@ -16,7 +16,7 @@ import ( func newNvidiaSMIExec(path string, cfg Config, log *logger.Logger) (*nvidiaSMIExec, error) { return &nvidiaSMIExec{ binPath: path, - timeout: cfg.Timeout.Duration, + timeout: cfg.Timeout.Duration(), Logger: log, }, nil } diff --git a/modules/nvidia_smi/nvidia_smi.go b/modules/nvidia_smi/nvidia_smi.go index 1370b4335..4ad9cce56 100644 --- a/modules/nvidia_smi/nvidia_smi.go +++ b/modules/nvidia_smi/nvidia_smi.go @@ -4,6 +4,7 @@ package nvidia_smi import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func init() { func New() *NvidiaSMI { return &NvidiaSMI{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 10}, + Timeout: web.Duration(time.Second * 10), UseCSVFormat: true, }, binName: "nvidia-smi", @@ -66,21 +67,33 @@ type ( } ) -func (nv *NvidiaSMI) Init() bool { +func (nv *NvidiaSMI) Configuration() any { + return nv.Config +} + +func (nv *NvidiaSMI) Init() error { if nv.exec == nil { smi, err := nv.initNvidiaSMIExec() if err != nil { nv.Error(err) - return false + return err } nv.exec = smi } - return true + return nil } -func (nv *NvidiaSMI) Check() bool { - return len(nv.Collect()) > 0 +func (nv *NvidiaSMI) Check() error { + mx, err := nv.collect() + if err != nil { + nv.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (nv *NvidiaSMI) Charts() *module.Charts { diff --git a/modules/nvidia_smi/nvidia_smi_test.go b/modules/nvidia_smi/nvidia_smi_test.go index cdd7742fd..bb83642f6 100644 --- a/modules/nvidia_smi/nvidia_smi_test.go +++ b/modules/nvidia_smi/nvidia_smi_test.go @@ -60,9 +60,9 @@ func TestNvidiaSMI_Init(t *testing.T) { test.prepare(nv) if test.wantFail { - assert.False(t, nv.Init()) + assert.Error(t, nv.Init()) } else { - assert.True(t, nv.Init()) + assert.NoError(t, nv.Init()) } }) } @@ -118,9 +118,9 @@ func TestNvidiaSMI_Check(t *testing.T) { test.prepare(nv) if test.wantFail { - assert.False(t, nv.Check()) + assert.Error(t, nv.Check()) } else { - assert.True(t, nv.Check()) + assert.NoError(t, nv.Check()) } }) } diff --git a/modules/nvme/config_schema.json b/modules/nvme/config_schema.json index fcd2869d6..3d9e0c901 100644 --- a/modules/nvme/config_schema.json +++ b/modules/nvme/config_schema.json @@ -1,22 +1,29 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/nvme job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/nvme job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "binary_path": { + "type": "string" + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "binary_path": { - "type": "string" - } + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/nvme/init.go b/modules/nvme/init.go index 70988031c..44ff90f4e 100644 --- a/modules/nvme/init.go +++ b/modules/nvme/init.go @@ -29,7 +29,7 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) { n.Debug("using ndsudo") return &nvmeCLIExec{ ndsudoPath: ndsudoPath, - timeout: n.Timeout.Duration, + timeout: n.Timeout.Duration(), }, nil } } @@ -51,14 +51,14 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) { } if sudoPath != "" { - ctx1, cancel1 := context.WithTimeout(context.Background(), n.Timeout.Duration) + ctx1, cancel1 := context.WithTimeout(context.Background(), n.Timeout.Duration()) defer cancel1() if _, err := exec.CommandContext(ctx1, sudoPath, "-n", "-v").Output(); err != nil { return nil, fmt.Errorf("can not run sudo on this host: %v", err) } - ctx2, cancel2 := context.WithTimeout(context.Background(), n.Timeout.Duration) + ctx2, cancel2 := context.WithTimeout(context.Background(), n.Timeout.Duration()) defer cancel2() if _, err := exec.CommandContext(ctx2, sudoPath, "-n", "-l", nvmePath).Output(); err != nil { @@ -69,6 +69,6 @@ func (n *NVMe) initNVMeCLIExec() (nvmeCLI, error) { return &nvmeCLIExec{ sudoPath: sudoPath, nvmePath: nvmePath, - timeout: n.Timeout.Duration, + timeout: n.Timeout.Duration(), }, nil } diff --git a/modules/nvme/nvme.go b/modules/nvme/nvme.go index d8f86869a..7d1d59bc2 100644 --- a/modules/nvme/nvme.go +++ b/modules/nvme/nvme.go @@ -4,6 +4,7 @@ package nvme import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func New() *NVMe { return &NVMe{ Config: Config{ BinaryPath: "nvme", - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, charts: &module.Charts{}, devicePaths: make(map[string]bool), @@ -61,24 +62,36 @@ type ( } ) -func (n *NVMe) Init() bool { +func (n *NVMe) Configuration() any { + return n.Config +} + +func (n *NVMe) Init() error { if err := n.validateConfig(); err != nil { n.Errorf("config validation: %v", err) - return false + return err } v, err := n.initNVMeCLIExec() if err != nil { n.Errorf("init nvme-cli exec: %v", err) - return false + return err } n.exec = v - return true + return nil } -func (n *NVMe) Check() bool { - return len(n.Collect()) > 0 +func (n *NVMe) Check() error { + mx, err := n.collect() + if err != nil { + n.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (n *NVMe) Charts() *module.Charts { diff --git a/modules/nvme/nvme_test.go b/modules/nvme/nvme_test.go index 26c55182b..7483f9d1a 100644 --- a/modules/nvme/nvme_test.go +++ b/modules/nvme/nvme_test.go @@ -58,9 +58,9 @@ func TestNVMe_Init(t *testing.T) { test.prepare(nv) if test.wantFail { - assert.False(t, nv.Init()) + assert.Error(t, nv.Init()) } else { - assert.True(t, nv.Init()) + assert.NoError(t, nv.Init()) } }) } @@ -104,9 +104,9 @@ func TestNVMe_Check(t *testing.T) { test.prepare(n) if test.wantFail { - assert.False(t, n.Check()) + assert.Error(t, n.Check()) } else { - assert.True(t, n.Check()) + assert.NoError(t, n.Check()) } }) } diff --git a/modules/openvpn/config_schema.json b/modules/openvpn/config_schema.json index db6442db9..ad776bf23 100644 --- a/modules/openvpn/config_schema.json +++ b/modules/openvpn/config_schema.json @@ -1,52 +1,59 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/openvpn job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "address": { - "type": "string" - }, - "connect_timeout": { - "type": [ - "string", - "integer" - ] - }, - "read_timeout": { - "type": [ - "string", - "integer" - ] - }, - "write_timeout": { - "type": [ - "string", - "integer" - ] - }, - "per_user_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/openvpn job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "connect_timeout": { + "type": [ + "string", + "integer" + ] + }, + "read_timeout": { + "type": [ + "string", + "integer" + ] + }, + "write_timeout": { + "type": [ + "string", + "integer" + ] + }, + "per_user_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } } - } + }, + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/openvpn/init.go b/modules/openvpn/init.go new file mode 100644 index 000000000..843981e48 --- /dev/null +++ b/modules/openvpn/init.go @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package openvpn + +import ( + "github.com/netdata/go.d.plugin/modules/openvpn/client" + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/socket" +) + +func (o *OpenVPN) validateConfig() error { + return nil +} + +func (o *OpenVPN) initPerUserMatcher() (matcher.Matcher, error) { + if o.PerUserStats.Empty() { + return nil, nil + } + return o.PerUserStats.Parse() +} + +func (o *OpenVPN) initClient() *client.Client { + config := socket.Config{ + Address: o.Address, + ConnectTimeout: o.ConnectTimeout.Duration(), + ReadTimeout: o.ReadTimeout.Duration(), + WriteTimeout: o.WriteTimeout.Duration(), + } + return &client.Client{Client: socket.New(config)} +} diff --git a/modules/openvpn/openvpn.go b/modules/openvpn/openvpn.go index 0a6ccbb81..6c161a3bd 100644 --- a/modules/openvpn/openvpn.go +++ b/modules/openvpn/openvpn.go @@ -6,19 +6,11 @@ import ( _ "embed" "time" + "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/modules/openvpn/client" "github.com/netdata/go.d.plugin/pkg/matcher" "github.com/netdata/go.d.plugin/pkg/socket" "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/netdata/go.d.plugin/agent/module" -) - -const ( - defaultAddress = "127.0.0.1:7505" - defaultConnectTimeout = time.Second * 2 - defaultReadTimeout = time.Second * 2 - defaultWriteTimeout = time.Second * 2 ) //go:embed "config_schema.json" @@ -37,10 +29,10 @@ func init() { // New creates OpenVPN with default values. func New() *OpenVPN { config := Config{ - Address: defaultAddress, - ConnectTimeout: web.Duration{Duration: defaultConnectTimeout}, - ReadTimeout: web.Duration{Duration: defaultReadTimeout}, - WriteTimeout: web.Duration{Duration: defaultWriteTimeout}, + Address: "127.0.0.1:7505", + ConnectTimeout: web.Duration(time.Second * 2), + ReadTimeout: web.Duration(time.Second * 2), + WriteTimeout: web.Duration(time.Second * 2), } return &OpenVPN{ Config: config, @@ -58,61 +50,55 @@ type Config struct { PerUserStats matcher.SimpleExpr `yaml:"per_user_stats"` } -type openVPNClient interface { - socket.Client - Version() (*client.Version, error) - LoadStats() (*client.LoadStats, error) - Users() (client.Users, error) -} - // OpenVPN OpenVPN module. -type OpenVPN struct { - module.Base - Config `yaml:",inline"` - client openVPNClient - charts *Charts - collectedUsers map[string]bool - perUserMatcher matcher.Matcher -} - -// Cleanup makes cleanup. -func (o *OpenVPN) Cleanup() { - if o.client == nil { - return +type ( + OpenVPN struct { + module.Base + Config `yaml:",inline"` + client openVPNClient + charts *Charts + collectedUsers map[string]bool + perUserMatcher matcher.Matcher } - _ = o.client.Disconnect() + openVPNClient interface { + socket.Client + Version() (*client.Version, error) + LoadStats() (*client.LoadStats, error) + Users() (client.Users, error) + } +) + +func (o *OpenVPN) Configuration() any { + return o.Config } // Init makes initialization. -func (o *OpenVPN) Init() bool { - if !o.PerUserStats.Empty() { - m, err := o.PerUserStats.Parse() - if err != nil { - o.Errorf("error on creating per user stats matcher : %v", err) - return false - } - o.perUserMatcher = matcher.WithCache(m) +func (o *OpenVPN) Init() error { + if err := o.validateConfig(); err != nil { + o.Error(err) + return err } - config := socket.Config{ - Address: o.Address, - ConnectTimeout: o.ConnectTimeout.Duration, - ReadTimeout: o.ReadTimeout.Duration, - WriteTimeout: o.WriteTimeout.Duration, + m, err := o.initPerUserMatcher() + if err != nil { + o.Error(err) + return err } - o.client = &client.Client{Client: socket.New(config)} + o.perUserMatcher = m + + o.client = o.initClient() o.Infof("using address: %s, connect timeout: %s, read timeout: %s, write timeout: %s", - o.Address, o.ConnectTimeout.Duration, o.ReadTimeout.Duration, o.WriteTimeout.Duration) + o.Address, o.ConnectTimeout, o.ReadTimeout, o.WriteTimeout) - return true + return nil } // Check makes check. -func (o *OpenVPN) Check() bool { +func (o *OpenVPN) Check() error { if err := o.client.Connect(); err != nil { o.Error(err) - return false + return err } defer func() { _ = o.client.Disconnect() }() @@ -120,11 +106,12 @@ func (o *OpenVPN) Check() bool { if err != nil { o.Error(err) o.Cleanup() - return false + return err } o.Infof("connected to OpenVPN v%d.%d.%d, Management v%d", ver.Major, ver.Minor, ver.Patch, ver.Management) - return true + + return nil } // Charts creates Charts. @@ -142,3 +129,11 @@ func (o *OpenVPN) Collect() map[string]int64 { } return mx } + +// Cleanup makes cleanup. +func (o *OpenVPN) Cleanup() { + if o.client == nil { + return + } + _ = o.client.Disconnect() +} diff --git a/modules/openvpn/openvpn_test.go b/modules/openvpn/openvpn_test.go index 02fa1a602..8981f21aa 100644 --- a/modules/openvpn/openvpn_test.go +++ b/modules/openvpn/openvpn_test.go @@ -5,7 +5,6 @@ package openvpn import ( "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/modules/openvpn/client" "github.com/netdata/go.d.plugin/pkg/matcher" "github.com/netdata/go.d.plugin/pkg/socket" @@ -36,28 +35,16 @@ var ( }} ) -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultAddress, job.Address) - assert.Equal(t, defaultConnectTimeout, job.ConnectTimeout.Duration) - assert.Equal(t, defaultReadTimeout, job.ReadTimeout.Duration) - assert.Equal(t, defaultWriteTimeout, job.WriteTimeout.Duration) - assert.NotNil(t, job.charts) - assert.NotNil(t, job.collectedUsers) -} - func TestOpenVPN_Init(t *testing.T) { - assert.True(t, New().Init()) + assert.NoError(t, New().Init()) } func TestOpenVPN_Check(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = prepareMockOpenVPNClient() - require.True(t, job.Check()) + require.NoError(t, job.Check()) } func TestOpenVPN_Charts(t *testing.T) { @@ -68,19 +55,19 @@ func TestOpenVPN_Cleanup(t *testing.T) { job := New() assert.NotPanics(t, job.Cleanup) - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = prepareMockOpenVPNClient() - require.True(t, job.Check()) + require.NoError(t, job.Check()) job.Cleanup() } func TestOpenVPN_Collect(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.perUserMatcher = matcher.TRUE() job.client = prepareMockOpenVPNClient() - require.True(t, job.Check()) + require.NoError(t, job.Check()) expected := map[string]int64{ "bytes_in": 1, @@ -99,12 +86,12 @@ func TestOpenVPN_Collect(t *testing.T) { func TestOpenVPN_Collect_UNDEFUsername(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.perUserMatcher = matcher.TRUE() cl := prepareMockOpenVPNClient() cl.users = testUsersUNDEF job.client = cl - require.True(t, job.Check()) + require.NoError(t, job.Check()) expected := map[string]int64{ "bytes_in": 1, diff --git a/modules/openvpn_status_log/config_schema.json b/modules/openvpn_status_log/config_schema.json index 904da56c0..118ab4312 100644 --- a/modules/openvpn_status_log/config_schema.json +++ b/modules/openvpn_status_log/config_schema.json @@ -1,34 +1,41 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/openvpn_status_log job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "log_path": { - "type": "string" - }, - "per_user_stats": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/openvpn_status_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "log_path": { + "type": "string" + }, + "per_user_stats": { + "type": "object", + "properties": { + "includes": { + "type": "array", + "items": { + "type": "string" + } + }, + "excludes": { + "type": "array", + "items": { + "type": "string" + } } } } - } + }, + "required": [ + "name", + "log_path" + ] }, - "required": [ - "name", - "log_path" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/openvpn_status_log/init.go b/modules/openvpn_status_log/init.go index 9bd34a510..5e1521e5e 100644 --- a/modules/openvpn_status_log/init.go +++ b/modules/openvpn_status_log/init.go @@ -7,14 +7,14 @@ import ( "github.com/netdata/go.d.plugin/pkg/matcher" ) -func (o OpenVPNStatusLog) validateConfig() error { +func (o *OpenVPNStatusLog) validateConfig() error { if o.LogPath == "" { return errors.New("empty 'log_path'") } return nil } -func (o OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) { +func (o *OpenVPNStatusLog) initPerUserStatsMatcher() (matcher.Matcher, error) { if o.PerUserStats.Empty() { return nil, nil } diff --git a/modules/openvpn_status_log/openvpn.go b/modules/openvpn_status_log/openvpn.go index dc9e7340b..b44969918 100644 --- a/modules/openvpn_status_log/openvpn.go +++ b/modules/openvpn_status_log/openvpn.go @@ -4,6 +4,7 @@ package openvpn_status_log import ( _ "embed" + "errors" "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/pkg/matcher" @@ -20,11 +21,10 @@ func init() { } func New() *OpenVPNStatusLog { - config := Config{ - LogPath: "/var/log/openvpn/status.log", - } return &OpenVPNStatusLog{ - Config: config, + Config: Config{ + LogPath: "/var/log/openvpn/status.log", + }, charts: charts.Copy(), collectedUsers: make(map[string]bool), } @@ -42,34 +42,46 @@ type OpenVPNStatusLog struct { charts *module.Charts - collectedUsers map[string]bool perUserMatcher matcher.Matcher + + collectedUsers map[string]bool } -func (o *OpenVPNStatusLog) Init() bool { +func (o *OpenVPNStatusLog) Configuration() any { + return o.Config +} + +func (o *OpenVPNStatusLog) Init() error { if err := o.validateConfig(); err != nil { o.Errorf("error on validating config: %v", err) - return false + return err } m, err := o.initPerUserStatsMatcher() if err != nil { o.Errorf("error on creating 'per_user_stats' matcher: %v", err) - return false + return err } - if m != nil { o.perUserMatcher = m } - return true + return nil } -func (o *OpenVPNStatusLog) Check() bool { - return len(o.Collect()) > 0 +func (o *OpenVPNStatusLog) Check() error { + mx, err := o.collect() + if err != nil { + o.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } -func (o OpenVPNStatusLog) Charts() *module.Charts { +func (o *OpenVPNStatusLog) Charts() *module.Charts { return o.charts } diff --git a/modules/openvpn_status_log/openvpn_test.go b/modules/openvpn_status_log/openvpn_test.go index d54d27824..f6db50e4a 100644 --- a/modules/openvpn_status_log/openvpn_test.go +++ b/modules/openvpn_status_log/openvpn_test.go @@ -49,9 +49,9 @@ func TestOpenVPNStatusLog_Init(t *testing.T) { ovpn.Config = test.config if test.wantFail { - assert.False(t, ovpn.Init()) + assert.Error(t, ovpn.Init()) } else { - assert.True(t, ovpn.Init()) + assert.NoError(t, ovpn.Init()) } }) } @@ -76,12 +76,12 @@ func TestOpenVPNStatusLog_Check(t *testing.T) { t.Run(name, func(t *testing.T) { ovpn := test.prepare() - require.True(t, ovpn.Init()) + require.NoError(t, ovpn.Init()) if test.wantFail { - assert.False(t, ovpn.Check()) + assert.Error(t, ovpn.Check()) } else { - assert.True(t, ovpn.Check()) + assert.NoError(t, ovpn.Check()) } }) } @@ -114,7 +114,7 @@ func TestOpenVPNStatusLog_Charts(t *testing.T) { t.Run(name, func(t *testing.T) { ovpn := test.prepare() - require.True(t, ovpn.Init()) + require.NoError(t, ovpn.Init()) _ = ovpn.Check() _ = ovpn.Collect() @@ -240,7 +240,7 @@ func TestOpenVPNStatusLog_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { ovpn := test.prepare() - require.True(t, ovpn.Init()) + require.NoError(t, ovpn.Init()) _ = ovpn.Check() collected := ovpn.Collect() diff --git a/modules/pgbouncer/collect.go b/modules/pgbouncer/collect.go index 40dbddb9f..c0e4bf2da 100644 --- a/modules/pgbouncer/collect.go +++ b/modules/pgbouncer/collect.go @@ -236,7 +236,7 @@ func (p *PgBouncer) queryVersion() (*semver.Version, error) { p.Debugf("executing query: %v", q) var resp string - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() if err := p.db.QueryRowContext(ctx, q).Scan(&resp); err != nil { return nil, err @@ -281,7 +281,7 @@ func (p *PgBouncer) openConnection() error { } func (p *PgBouncer) collectQuery(query string, assign func(column, value string)) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() rows, err := p.db.QueryContext(ctx, query) if err != nil { diff --git a/modules/pgbouncer/config_schema.json b/modules/pgbouncer/config_schema.json index 16cf22ecb..4570c8798 100644 --- a/modules/pgbouncer/config_schema.json +++ b/modules/pgbouncer/config_schema.json @@ -1,23 +1,30 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/pgbouncer job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/pgbouncer job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "dsn": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/pgbouncer/pgbouncer.go b/modules/pgbouncer/pgbouncer.go index ebb11327b..a19f8f074 100644 --- a/modules/pgbouncer/pgbouncer.go +++ b/modules/pgbouncer/pgbouncer.go @@ -5,6 +5,7 @@ package pgbouncer import ( "database/sql" _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -27,7 +28,7 @@ func init() { func New() *PgBouncer { return &PgBouncer{ Config: Config{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), DSN: "postgres://postgres:postgres@127.0.0.1:6432/pgbouncer", }, charts: globalCharts.Copy(), @@ -59,18 +60,30 @@ type PgBouncer struct { metrics *metrics } -func (p *PgBouncer) Init() bool { +func (p *PgBouncer) Configuration() any { + return p.Config +} + +func (p *PgBouncer) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } - return true + return nil } -func (p *PgBouncer) Check() bool { - return len(p.Collect()) > 0 +func (p *PgBouncer) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *PgBouncer) Charts() *module.Charts { diff --git a/modules/pgbouncer/pgbouncer_test.go b/modules/pgbouncer/pgbouncer_test.go index e1e0695dd..d6b69abb7 100644 --- a/modules/pgbouncer/pgbouncer_test.go +++ b/modules/pgbouncer/pgbouncer_test.go @@ -60,9 +60,9 @@ func TestPgBouncer_Init(t *testing.T) { p.Config = test.config if test.wantFail { - assert.False(t, p.Init()) + assert.Error(t, p.Init()) } else { - assert.True(t, p.Init()) + assert.NoError(t, p.Init()) } }) } @@ -118,14 +118,14 @@ func TestPgBouncer_Check(t *testing.T) { p.db = db defer func() { _ = db.Close() }() - require.True(t, p.Init()) + require.NoError(t, p.Init()) test.prepareMock(t, mock) if test.wantFail { - assert.False(t, p.Check()) + assert.Error(t, p.Check()) } else { - assert.True(t, p.Check()) + assert.NoError(t, p.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -283,7 +283,7 @@ func TestPgBouncer_Collect(t *testing.T) { p.db = db defer func() { _ = db.Close() }() - require.True(t, p.Init()) + require.NoError(t, p.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/phpdaemon/config_schema.json b/modules/phpdaemon/config_schema.json index c200d437b..8e5ad48a8 100644 --- a/modules/phpdaemon/config_schema.json +++ b/modules/phpdaemon/config_schema.json @@ -1,59 +1,199 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/phpdaemon job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "phpDaemon collector configuration.", + "type": "object", + "properties": { + "configuration_mode": { + "title": "Configuration", + "type": "string", + "enum": [ + "standard", + "advanced" + ], + "default": "standard" + } }, - "proxy_password": { - "type": "string" + "dependencies": { + "configuration_mode": { + "oneOf": [ + { + "properties": { + "configuration_mode": { + "const": "standard" + }, + "url": { + "$ref": "#/definitions/url" + }, + "tls_skip_verify": { + "$ref": "#/definitions/tls_skip_verify" + }, + "update_every": { + "$ref": "#/definitions/update_every" + }, + "timeout": { + "$ref": "#/definitions/timeout" + } + }, + "required": [ + "url" + ] + }, + { + "properties": { + "configuration_mode": { + "const": "advanced" + }, + "url": { + "$ref": "#/definitions/url" + }, + "tls_skip_verify": { + "$ref": "#/definitions/tls_skip_verify" + }, + "update_every": { + "$ref": "#/definitions/update_every" + }, + "timeout": { + "$ref": "#/definitions/timeout" + }, + "username": { + "$ref": "#/definitions/username" + }, + "password": { + "$ref": "#/definitions/password" + }, + "proxy_url": { + "$ref": "#/definitions/proxy_url" + }, + "proxy_username": { + "$ref": "#/definitions/proxy_username" + }, + "proxy_password": { + "$ref": "#/definitions/proxy_password" + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "not_follow_redirects": { + "$ref": "#/definitions/not_follow_redirects" + }, + "tls_ca": { + "$ref": "#/definitions/tls_ca" + }, + "tls_cert": { + "$ref": "#/definitions/tls_cert" + }, + "tls_key": { + "$ref": "#/definitions/tls_key" + } + }, + "required": [ + "url" + ] + } + ] + } }, - "headers": { - "type": "object", - "additionalProperties": { + "definitions": { + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "minimum": 1, + "default": 1, + "type": "integer" + }, + "url": { + "title": "URL", + "description": "The URL of the phpDaemon status page.", + "type": "string", + "default": "http://127.0.0.1:8509/FullStatuss" + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "minimum": 0.5, + "default": 1, + "type": "number" + }, + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + }, + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" + "configuration_mode": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/phpdaemon/init.go b/modules/phpdaemon/init.go new file mode 100644 index 000000000..d96b23011 --- /dev/null +++ b/modules/phpdaemon/init.go @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package phpdaemon + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (p *PHPDaemon) validateConfig() error { + if p.URL == "" { + return errors.New("url not set") + } + if _, err := web.NewHTTPRequest(p.Request); err != nil { + return err + } + return nil +} + +func (p *PHPDaemon) initClient() (*client, error) { + httpClient, err := web.NewHTTPClient(p.Client) + if err != nil { + return nil, err + } + return newAPIClient(httpClient, p.Request), nil +} diff --git a/modules/phpdaemon/phpdaemon.go b/modules/phpdaemon/phpdaemon.go index 506892cfe..8d3a46589 100644 --- a/modules/phpdaemon/phpdaemon.go +++ b/modules/phpdaemon/phpdaemon.go @@ -4,6 +4,7 @@ package phpdaemon import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -21,86 +22,82 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1:8509/FullStatus" - defaultHTTPTimeout = time.Second * 2 -) - // New creates PHPDaemon with default values. func New() *PHPDaemon { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &PHPDaemon{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8509/FullStatus", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &PHPDaemon{ - Config: config, charts: charts.Copy(), } } // Config is the PHPDaemon module configuration. type Config struct { - web.HTTP `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` } // PHPDaemon PHPDaemon module. type PHPDaemon struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` client *client charts *Charts } -// Cleanup makes cleanup. -func (PHPDaemon) Cleanup() {} +func (p *PHPDaemon) Configuration() any { + return p.Config +} // Init makes initialization. -func (p *PHPDaemon) Init() bool { - httpClient, err := web.NewHTTPClient(p.Client) - if err != nil { - p.Errorf("error on creating http client : %v", err) - return false +func (p *PHPDaemon) Init() error { + if err := p.validateConfig(); err != nil { + p.Error(err) + return err } - _, err = web.NewHTTPRequest(p.Request) + c, err := p.initClient() if err != nil { - p.Errorf("error on creating http request to %s : %v", p.URL, err) - return false + p.Error(err) + return err } - - p.client = newAPIClient(httpClient, p.Request) + p.client = c p.Debugf("using URL %s", p.URL) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) - return true + return nil } // Check makes check. -func (p *PHPDaemon) Check() bool { - mx := p.Collect() - +func (p *PHPDaemon) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } if len(mx) == 0 { - return false + return errors.New("no metrics collected") } + if _, ok := mx["uptime"]; ok { - // TODO: remove panic - panicIf(p.charts.Add(uptimeChart.Copy())) + _ = p.charts.Add(uptimeChart.Copy()) } - return true + return nil } // Charts creates Charts. -func (p PHPDaemon) Charts() *Charts { return p.charts } +func (p *PHPDaemon) Charts() *Charts { return p.charts } // Collect collects metrics. func (p *PHPDaemon) Collect() map[string]int64 { @@ -114,9 +111,9 @@ func (p *PHPDaemon) Collect() map[string]int64 { return mx } -func panicIf(err error) { - if err == nil { - return +// Cleanup makes cleanup. +func (p *PHPDaemon) Cleanup() { + if p.client != nil && p.client.httpClient != nil { + p.client.httpClient.CloseIdleConnections() } - panic(err) } diff --git a/modules/phpdaemon/phpdaemon_test.go b/modules/phpdaemon/phpdaemon_test.go index 0634e6ec4..aea27e54f 100644 --- a/modules/phpdaemon/phpdaemon_test.go +++ b/modules/phpdaemon/phpdaemon_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -23,18 +22,10 @@ func Test_testData(t *testing.T) { assert.NotEmpty(t, testFullStatusData) } -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) -} - func TestPHPDaemon_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.client) } @@ -48,15 +39,15 @@ func TestPHPDaemon_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestPHPDaemon_CheckNG(t *testing.T) { job := New() job.URL = testURL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestPHPDaemon_Charts(t *testing.T) { @@ -73,8 +64,8 @@ func TestPHPDaemon_Charts(t *testing.T) { defer ts.Close() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) assert.True(t, job.charts.Has(uptimeChart.ID)) } @@ -92,8 +83,8 @@ func TestPHPDaemon_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) expected := map[string]int64{ "alive": 350, @@ -121,8 +112,8 @@ func TestPHPDaemon_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestPHPDaemon_404(t *testing.T) { @@ -135,6 +126,6 @@ func TestPHPDaemon_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/phpfpm/config_schema.json b/modules/phpfpm/config_schema.json index a6b0140f3..60cc0ae52 100644 --- a/modules/phpfpm/config_schema.json +++ b/modules/phpfpm/config_schema.json @@ -1,84 +1,91 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/phpfpm job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "socket": { - "type": "string" - }, - "address": { - "type": "string" - }, - "fcgi_path": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/phpfpm job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "socket": { + "type": "string" + }, + "address": { + "type": "string" + }, + "fcgi_path": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "oneOf": [ + { + "required": [ + "name", + "url" + ] + }, + { + "required": [ + "name", + "socket" + ] + }, + { + "required": [ + "name", + "address" + ] + } + ] }, - "oneOf": [ - { - "required": [ - "name", - "url" - ] - }, - { - "required": [ - "name", - "socket" - ] - }, - { - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true } - ] + } } diff --git a/modules/phpfpm/init.go b/modules/phpfpm/init.go index 0e764cbe0..5a6694634 100644 --- a/modules/phpfpm/init.go +++ b/modules/phpfpm/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (p Phpfpm) initClient() (client, error) { +func (p *Phpfpm) initClient() (client, error) { if p.Socket != "" { return p.initSocketClient() } @@ -20,32 +20,38 @@ func (p Phpfpm) initClient() (client, error) { if p.URL != "" { return p.initHTTPClient() } + return nil, errors.New("neither 'socket' nor 'url' set") } -func (p Phpfpm) initHTTPClient() (*httpClient, error) { +func (p *Phpfpm) initHTTPClient() (*httpClient, error) { c, err := web.NewHTTPClient(p.Client) if err != nil { return nil, fmt.Errorf("create HTTP client: %v", err) } + p.Debugf("using HTTP client, URL: %s", p.URL) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) + return newHTTPClient(c, p.Request) } -func (p Phpfpm) initSocketClient() (*socketClient, error) { +func (p *Phpfpm) initSocketClient() (*socketClient, error) { if _, err := os.Stat(p.Socket); err != nil { return nil, fmt.Errorf("the socket '%s' does not exist: %v", p.Socket, err) } + p.Debugf("using socket client: %s", p.Socket) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) p.Debugf("using fcgi path: %s", p.FcgiPath) - return newSocketClient(p.Socket, p.Timeout.Duration, p.FcgiPath), nil + + return newSocketClient(p.Socket, p.Timeout.Duration(), p.FcgiPath), nil } -func (p Phpfpm) initTcpClient() (*tcpClient, error) { +func (p *Phpfpm) initTcpClient() (*tcpClient, error) { p.Debugf("using tcp client: %s", p.Address) - p.Debugf("using timeout: %s", p.Timeout.Duration) + p.Debugf("using timeout: %s", p.Timeout) p.Debugf("using fcgi path: %s", p.FcgiPath) - return newTcpClient(p.Address, p.Timeout.Duration, p.FcgiPath), nil + + return newTcpClient(p.Address, p.Timeout.Duration(), p.FcgiPath), nil } diff --git a/modules/phpfpm/phpfpm.go b/modules/phpfpm/phpfpm.go index a61827929..79eee44f7 100644 --- a/modules/phpfpm/phpfpm.go +++ b/modules/phpfpm/phpfpm.go @@ -4,6 +4,7 @@ package phpfpm import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -29,7 +30,7 @@ func New() *Phpfpm { URL: "http://127.0.0.1/status?full&json", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, FcgiPath: "/status", @@ -37,36 +38,48 @@ func New() *Phpfpm { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - Socket string `yaml:"socket"` - Address string `yaml:"address"` - FcgiPath string `yaml:"fcgi_path"` - } - Phpfpm struct { - module.Base - Config `yaml:",inline"` +type Config struct { + web.HTTP `yaml:",inline"` + Socket string `yaml:"socket"` + Address string `yaml:"address"` + FcgiPath string `yaml:"fcgi_path"` +} - client client - } -) +type Phpfpm struct { + module.Base + Config `yaml:",inline"` + + client client +} + +func (p *Phpfpm) Configuration() any { + return p.Config +} -func (p *Phpfpm) Init() bool { +func (p *Phpfpm) Init() error { c, err := p.initClient() if err != nil { p.Errorf("init client: %v", err) - return false + return err } p.client = c - return true + + return nil } -func (p *Phpfpm) Check() bool { - return len(p.Collect()) > 0 +func (p *Phpfpm) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } -func (Phpfpm) Charts() *Charts { +func (p *Phpfpm) Charts() *Charts { return charts.Copy() } @@ -82,4 +95,4 @@ func (p *Phpfpm) Collect() map[string]int64 { return mx } -func (Phpfpm) Cleanup() {} +func (p *Phpfpm) Cleanup() {} diff --git a/modules/phpfpm/phpfpm_test.go b/modules/phpfpm/phpfpm_test.go index 5b9ecd236..4f6cb212e 100644 --- a/modules/phpfpm/phpfpm_test.go +++ b/modules/phpfpm/phpfpm_test.go @@ -38,9 +38,7 @@ func TestNew(t *testing.T) { func TestPhpfpm_Init(t *testing.T) { job := New() - got := job.Init() - - require.True(t, got) + require.NoError(t, job.Init()) assert.NotNil(t, job.client) } @@ -54,30 +52,23 @@ func TestPhpfpm_Check(t *testing.T) { job := New() job.URL = ts.URL - job.Init() - require.True(t, job.Init()) - - got := job.Check() + require.NoError(t, job.Init()) - assert.True(t, got) + assert.NoError(t, job.Check()) } func TestPhpfpm_CheckReturnsFalseOnFailure(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/us" - require.True(t, job.Init()) - - got := job.Check() + require.NoError(t, job.Init()) - assert.False(t, got) + assert.Error(t, job.Check()) } func TestPhpfpm_Charts(t *testing.T) { job := New() - got := job.Charts() - - assert.NotNil(t, got) + assert.NotNil(t, job.Charts()) } func TestPhpfpm_CollectJSON(t *testing.T) { @@ -90,7 +81,7 @@ func TestPhpfpm_CollectJSON(t *testing.T) { job := New() job.URL = ts.URL + "/?json" - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -115,7 +106,7 @@ func TestPhpfpm_CollectJSONFull(t *testing.T) { job := New() job.URL = ts.URL + "/?json" - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -149,7 +140,7 @@ func TestPhpfpm_CollectNoIdleProcessesJSONFull(t *testing.T) { job := New() job.URL = ts.URL + "/?json" - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -174,7 +165,7 @@ func TestPhpfpm_CollectText(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -199,7 +190,7 @@ func TestPhpfpm_CollectTextFull(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) + require.NoError(t, job.Init()) got := job.Collect() @@ -233,11 +224,9 @@ func TestPhpfpm_CollectReturnsNothingWhenInvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - - got := job.Collect() + require.NoError(t, job.Init()) - assert.Len(t, got, 0) + assert.Len(t, job.Collect(), 0) } func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) { @@ -250,11 +239,9 @@ func TestPhpfpm_CollectReturnsNothingWhenEmptyData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - - got := job.Collect() + require.NoError(t, job.Init()) - assert.Len(t, got, 0) + assert.Len(t, job.Collect(), 0) } func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) { @@ -267,11 +254,9 @@ func TestPhpfpm_CollectReturnsNothingWhenBadStatusCode(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - - got := job.Collect() + require.NoError(t, job.Init()) - assert.Len(t, got, 0) + assert.Len(t, job.Collect(), 0) } func TestPhpfpm_Cleanup(t *testing.T) { diff --git a/modules/pihole/config_schema.json b/modules/pihole/config_schema.json index e4c13fa10..718623da9 100644 --- a/modules/pihole/config_schema.json +++ b/modules/pihole/config_schema.json @@ -1,62 +1,250 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/pihole job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pi-hole collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "url": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The base URL of the Pi-hole instance.", + "type": "string", + "default": "http://127.0.0.1" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "setup_vars_path": { + "title": "Path to setupVars.conf", + "description": "This file is used to get the web password.", + "type": "string", + "default": "/etc/pihole/setupVars.conf" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "setup_vars_path": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "username": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/pihole/pihole.go b/modules/pihole/pihole.go index 6aba5cad0..cad9e6c93 100644 --- a/modules/pihole/pihole.go +++ b/modules/pihole/pihole.go @@ -4,6 +4,7 @@ package pihole import ( _ "embed" + "errors" "net/http" "sync" "time" @@ -34,7 +35,8 @@ func New() *Pihole { URL: "http://127.0.0.1", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}}, + Timeout: web.Duration(time.Second * 5), + }, }, SetupVarsPath: "/etc/pihole/setupVars.conf", }, @@ -46,13 +48,15 @@ func New() *Pihole { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` SetupVarsPath string `yaml:"setup_vars_path"` } type Pihole struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts addQueriesTypesOnce *sync.Once @@ -62,16 +66,20 @@ type Pihole struct { checkVersion bool } -func (p *Pihole) Init() bool { +func (p *Pihole) Configuration() any { + return p.Config +} + +func (p *Pihole) Init() error { if err := p.validateConfig(); err != nil { p.Errorf("config validation: %v", err) - return false + return err } httpClient, err := p.initHTTPClient() if err != nil { p.Errorf("init http client: %v", err) - return false + return err } p.httpClient = httpClient @@ -82,11 +90,19 @@ func (p *Pihole) Init() bool { p.Debugf("web password: %s", p.Password) } - return true + return nil } -func (p *Pihole) Check() bool { - return len(p.Collect()) > 0 +func (p *Pihole) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Pihole) Charts() *module.Charts { diff --git a/modules/pihole/pihole_test.go b/modules/pihole/pihole_test.go index 08ad244a7..65168083e 100644 --- a/modules/pihole/pihole_test.go +++ b/modules/pihole/pihole_test.go @@ -52,9 +52,9 @@ func TestPihole_Init(t *testing.T) { p.Config = test.config if test.wantFail { - assert.False(t, p.Init()) + assert.Error(t, p.Init()) } else { - assert.True(t, p.Init()) + assert.NoError(t, p.Init()) } }) } @@ -85,9 +85,9 @@ func TestPihole_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, p.Check()) + assert.Error(t, p.Check()) } else { - assert.True(t, p.Check()) + assert.NoError(t, p.Check()) } }) } @@ -164,7 +164,7 @@ func caseSuccessWithWebPassword(t *testing.T) (*Pihole, func()) { p.SetupVarsPath = pathSetupVarsOK p.URL = srv.URL - require.True(t, p.Init()) + require.NoError(t, p.Init()) return p, srv.Close } @@ -175,7 +175,7 @@ func caseFailNoWebPassword(t *testing.T) (*Pihole, func()) { p.SetupVarsPath = pathSetupVarsWrong p.URL = srv.URL - require.True(t, p.Init()) + require.NoError(t, p.Init()) return p, srv.Close } @@ -186,7 +186,7 @@ func caseFailUnsupportedVersion(t *testing.T) (*Pihole, func()) { p.SetupVarsPath = pathSetupVarsOK p.URL = srv.URL - require.True(t, p.Init()) + require.NoError(t, p.Init()) return p, srv.Close } diff --git a/modules/pika/config_schema.json b/modules/pika/config_schema.json index d284faaa1..4f3529c8f 100644 --- a/modules/pika/config_schema.json +++ b/modules/pika/config_schema.json @@ -1,35 +1,42 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/pika job configuration schema.", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/pika job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/pika/init.go b/modules/pika/init.go index 2ad3ae8ec..5d9e34451 100644 --- a/modules/pika/init.go +++ b/modules/pika/init.go @@ -35,9 +35,9 @@ func (p Pika) initRedisClient() (*redis.Client, error) { opts.PoolSize = 1 opts.TLSConfig = tlsConfig - opts.DialTimeout = p.Timeout.Duration - opts.ReadTimeout = p.Timeout.Duration - opts.WriteTimeout = p.Timeout.Duration + opts.DialTimeout = p.Timeout.Duration() + opts.ReadTimeout = p.Timeout.Duration() + opts.WriteTimeout = p.Timeout.Duration() return redis.NewClient(opts), nil } diff --git a/modules/pika/pika.go b/modules/pika/pika.go index a14a44113..2121850cd 100644 --- a/modules/pika/pika.go +++ b/modules/pika/pika.go @@ -5,6 +5,7 @@ package pika import ( "context" _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Pika { return &Pika{ Config: Config{ Address: "redis://@localhost:9221", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, collectedCommands: make(map[string]bool), @@ -64,32 +65,44 @@ type ( } ) -func (p *Pika) Init() bool { +func (p *Pika) Configuration() any { + return p.Config +} + +func (p *Pika) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } pdb, err := p.initRedisClient() if err != nil { p.Errorf("init redis client: %v", err) - return false + return err } p.pdb = pdb charts, err := p.initCharts() if err != nil { p.Errorf("init charts: %v", err) - return false + return err } p.charts = charts - return true + return nil } -func (p *Pika) Check() bool { - return len(p.Collect()) > 0 +func (p *Pika) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Pika) Charts() *module.Charts { diff --git a/modules/pika/pika_test.go b/modules/pika/pika_test.go index a564a54ce..df883a64d 100644 --- a/modules/pika/pika_test.go +++ b/modules/pika/pika_test.go @@ -64,9 +64,9 @@ func TestPika_Init(t *testing.T) { pika.Config = test.config if test.wantFail { - assert.False(t, pika.Init()) + assert.Error(t, pika.Init()) } else { - assert.True(t, pika.Init()) + assert.NoError(t, pika.Init()) } }) } @@ -95,9 +95,9 @@ func TestPika_Check(t *testing.T) { pika := test.prepare(t) if test.wantFail { - assert.False(t, pika.Check()) + assert.Error(t, pika.Check()) } else { - assert.True(t, pika.Check()) + assert.NoError(t, pika.Check()) } }) } @@ -105,7 +105,7 @@ func TestPika_Check(t *testing.T) { func TestPika_Charts(t *testing.T) { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) assert.NotNil(t, pika.Charts()) } @@ -114,7 +114,7 @@ func TestPika_Cleanup(t *testing.T) { pika := New() assert.NotPanics(t, pika.Cleanup) - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) m := &mockRedisClient{} pika.pdb = m @@ -195,7 +195,7 @@ func TestPika_Collect(t *testing.T) { func preparePikaV340(t *testing.T) *Pika { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) pika.pdb = &mockRedisClient{ result: v340InfoAll, } @@ -204,7 +204,7 @@ func preparePikaV340(t *testing.T) *Pika { func preparePikaErrorOnInfo(t *testing.T) *Pika { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) pika.pdb = &mockRedisClient{ errOnInfo: true, } @@ -213,7 +213,7 @@ func preparePikaErrorOnInfo(t *testing.T) *Pika { func preparePikaWithRedisMetrics(t *testing.T) *Pika { pika := New() - require.True(t, pika.Init()) + require.NoError(t, pika.Init()) pika.pdb = &mockRedisClient{ result: redisInfoAll, } diff --git a/modules/ping/config_schema.json b/modules/ping/config_schema.json index fe3779bf4..944bf4ced 100644 --- a/modules/ping/config_schema.json +++ b/modules/ping/config_schema.json @@ -1,47 +1,54 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/ping job configuration schema.", - "properties": { - "name": { - "type": "string" - }, - "update_every": { - "type": "integer", - "minimum": 1 - }, - "hosts": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/ping job configuration schema.", + "properties": { + "update_every": { + "type": "integer", + "default": 1, + "minimum": 1 }, - "minItems": 1 - }, - "network": { - "type": "string", - "enum": [ - "ip", - "ip4", - "ip6" - ] - }, - "privileged": { - "type": "boolean" - }, - "sendPackets": { - "type": "integer", - "minimum": 1 - }, - "interval": { - "type": "integer", - "minimum": 1 + "hosts": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "network": { + "type": "string", + "default": "ip", + "enum": [ + "ip", + "ip4", + "ip6" + ] + }, + "privileged": { + "default": true, + "type": "boolean" + }, + "sendPackets": { + "type": "integer", + "default": 5, + "minimum": 1 + }, + "interval": { + "type": "string", + "default": "1s" + }, + "interface": { + "type": "string" + } }, - "interface": { - "type": "string" - } + "required": [ + "hosts" + ] }, - "required": [ - "name", - "hosts" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/ping/init.go b/modules/ping/init.go index e71aa6c75..62d78c8e6 100644 --- a/modules/ping/init.go +++ b/modules/ping/init.go @@ -31,7 +31,7 @@ func (p *Ping) initProber() (prober, error) { privileged: p.Privileged, packets: p.SendPackets, iface: p.Interface, - interval: p.Interval.Duration, + interval: p.Interval.Duration(), deadline: deadline, } diff --git a/modules/ping/ping.go b/modules/ping/ping.go index 7aa402985..4ef51ce78 100644 --- a/modules/ping/ping.go +++ b/modules/ping/ping.go @@ -4,6 +4,7 @@ package ping import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -32,7 +33,7 @@ func New() *Ping { Network: "ip", Privileged: true, SendPackets: 5, - Interval: web.Duration{Duration: time.Millisecond * 100}, + Interval: web.Duration(time.Millisecond * 100), }, charts: &module.Charts{}, @@ -42,13 +43,13 @@ func New() *Ping { } type Config struct { - UpdateEvery int `yaml:"update_every"` - Hosts []string `yaml:"hosts"` - Network string `yaml:"network"` - Privileged bool `yaml:"privileged"` - SendPackets int `yaml:"packets"` - Interval web.Duration `yaml:"interval"` - Interface string `yaml:"interface"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + Hosts []string `yaml:"hosts" json:"hosts"` + Network string `yaml:"network" json:"network"` + Privileged bool `yaml:"privileged" json:"privileged"` + SendPackets int `yaml:"packets" json:"packets"` + Interval web.Duration `yaml:"interval" json:"interval"` + Interface string `yaml:"interface" json:"interface"` } type ( @@ -68,25 +69,37 @@ type ( } ) -func (p *Ping) Init() bool { +func (p *Ping) Configuration() any { + return p.Config +} + +func (p *Ping) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } pr, err := p.initProber() if err != nil { p.Errorf("init prober: %v", err) - return false + return err } p.prober = pr - return true + return nil } -func (p *Ping) Check() bool { - return len(p.Collect()) > 0 +func (p *Ping) Check() error { + mx, err := p.collect() + if err != nil { + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + + } + return nil } func (p *Ping) Charts() *module.Charts { diff --git a/modules/ping/ping_test.go b/modules/ping/ping_test.go index 57958d557..c68b61f70 100644 --- a/modules/ping/ping_test.go +++ b/modules/ping/ping_test.go @@ -39,9 +39,9 @@ func TestPing_Init(t *testing.T) { ping.UpdateEvery = 1 if test.wantFail { - assert.False(t, ping.Init()) + assert.Error(t, ping.Init()) } else { - assert.True(t, ping.Init()) + assert.NoError(t, ping.Init()) } }) } @@ -75,9 +75,9 @@ func TestPing_Check(t *testing.T) { ping := test.prepare(t) if test.wantFail { - assert.False(t, ping.Check()) + assert.Error(t, ping.Check()) } else { - assert.True(t, ping.Check()) + assert.NoError(t, ping.Check()) } }) } @@ -145,7 +145,7 @@ func casePingSuccess(t *testing.T) *Ping { ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober { return &mockProber{} } - require.True(t, ping.Init()) + require.NoError(t, ping.Init()) return ping } @@ -156,7 +156,7 @@ func casePingError(t *testing.T) *Ping { ping.newProber = func(_ pingProberConfig, _ *logger.Logger) prober { return &mockProber{errOnPing: true} } - require.True(t, ping.Init()) + require.NoError(t, ping.Init()) return ping } diff --git a/modules/portcheck/collect.go b/modules/portcheck/collect.go index 723c105c3..dab45ec41 100644 --- a/modules/portcheck/collect.go +++ b/modules/portcheck/collect.go @@ -41,7 +41,7 @@ func (pc *PortCheck) collect() (map[string]int64, error) { func (pc *PortCheck) checkPort(p *port) { start := time.Now() - conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration) + conn, err := pc.dial("tcp", fmt.Sprintf("%s:%d", pc.Host, p.number), pc.Timeout.Duration()) dur := time.Since(start) defer func() { diff --git a/modules/portcheck/config_schema.json b/modules/portcheck/config_schema.json index 8b9515702..e4782486e 100644 --- a/modules/portcheck/config_schema.json +++ b/modules/portcheck/config_schema.json @@ -1,37 +1,44 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/portcheck job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string", - "minLength": 1 - }, - "host": { - "type": "string", - "minLength": 1 - }, - "ports": { - "type": "array", - "items": { - "type": "integer", - "minimum": 1 + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/portcheck job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string", + "minLength": 1 + }, + "host": { + "type": "string", + "minLength": 1 }, - "minItems": 1 + "ports": { + "type": "array", + "items": { + "type": "integer", + "minimum": 1 + }, + "minItems": 1 + }, + "timeout": { + "type": [ + "string", + "integer" + ], + "minLength": 1, + "minimum": 1, + "description": "The timeout duration, in seconds. Must be at least 1." + } }, - "timeout": { - "type": [ - "string", - "integer" - ], - "minLength": 1, - "minimum": 1, - "description": "The timeout duration, in seconds. Must be at least 1." - } + "required": [ + "name", + "host", + "ports" + ] }, - "required": [ - "name", - "host", - "ports" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/portcheck/init.go b/modules/portcheck/init.go index d5c2ebb55..23825620b 100644 --- a/modules/portcheck/init.go +++ b/modules/portcheck/init.go @@ -4,10 +4,21 @@ package portcheck import ( "errors" + "net" + "time" "github.com/netdata/go.d.plugin/agent/module" ) +type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error) + +type port struct { + number int + state checkState + inState int + latency int +} + func (pc *PortCheck) validateConfig() error { if pc.Host == "" { return errors.New("'host' parameter not set") @@ -29,3 +40,10 @@ func (pc *PortCheck) initCharts() (*module.Charts, error) { return &charts, nil } + +func (pc *PortCheck) initPorts() (ports []*port) { + for _, p := range pc.Ports { + ports = append(ports, &port{number: p}) + } + return ports +} diff --git a/modules/portcheck/portcheck.go b/modules/portcheck/portcheck.go index c7e2c0b9d..1e81c3bad 100644 --- a/modules/portcheck/portcheck.go +++ b/modules/portcheck/portcheck.go @@ -27,7 +27,7 @@ func init() { func New() *PortCheck { return &PortCheck{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, dial: net.DialTimeout, } @@ -39,15 +39,6 @@ type Config struct { Timeout web.Duration `yaml:"timeout"` } -type dialFunc func(network, address string, timeout time.Duration) (net.Conn, error) - -type port struct { - number int - state checkState - inState int - latency int -} - type PortCheck struct { module.Base Config `yaml:",inline"` @@ -58,32 +49,34 @@ type PortCheck struct { ports []*port } -func (pc *PortCheck) Init() bool { +func (pc *PortCheck) Configuration() any { + return pc.Config +} + +func (pc *PortCheck) Init() error { if err := pc.validateConfig(); err != nil { pc.Errorf("config validation: %v", err) - return false + return err } charts, err := pc.initCharts() if err != nil { pc.Errorf("init charts: %v", err) - return false + return err } pc.charts = charts - for _, p := range pc.Ports { - pc.ports = append(pc.ports, &port{number: p}) - } + pc.ports = pc.initPorts() pc.Debugf("using host: %s", pc.Host) pc.Debugf("using ports: %v", pc.Ports) pc.Debugf("using TCP connection timeout: %s", pc.Timeout) - return true + return nil } -func (pc *PortCheck) Check() bool { - return true +func (pc *PortCheck) Check() error { + return nil } func (pc *PortCheck) Charts() *module.Charts { diff --git a/modules/portcheck/portcheck_test.go b/modules/portcheck/portcheck_test.go index 2e242cbbb..28af96800 100644 --- a/modules/portcheck/portcheck_test.go +++ b/modules/portcheck/portcheck_test.go @@ -25,21 +25,21 @@ func TestPortCheck_Init(t *testing.T) { job.Host = "127.0.0.1" job.Ports = []int{39001, 39002} - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.Len(t, job.ports, 2) } func TestPortCheck_InitNG(t *testing.T) { job := New() - assert.False(t, job.Init()) + assert.Error(t, job.Init()) job.Host = "127.0.0.1" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) job.Ports = []int{39001, 39002} - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) } func TestPortCheck_Check(t *testing.T) { - assert.True(t, New().Check()) + assert.NoError(t, New().Check()) } func TestPortCheck_Cleanup(t *testing.T) { @@ -50,7 +50,7 @@ func TestPortCheck_Charts(t *testing.T) { job := New() job.Ports = []int{1, 2} job.Host = "localhost" - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.Len(t, *job.Charts(), len(chartsTmpl)*len(job.Ports)) } @@ -61,8 +61,8 @@ func TestPortCheck_Collect(t *testing.T) { job.Ports = []int{39001, 39002} job.UpdateEvery = 5 job.dial = testDial(nil) - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) copyLatency := func(dst, src map[string]int64) { for k := range dst { diff --git a/modules/postgres/collect.go b/modules/postgres/collect.go index f66e956a3..b43e2806e 100644 --- a/modules/postgres/collect.go +++ b/modules/postgres/collect.go @@ -132,7 +132,7 @@ func (p *Postgres) openPrimaryConnection() (*sql.DB, error) { db.SetMaxIdleConns(1) db.SetConnMaxLifetime(10 * time.Minute) - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() if err := db.PingContext(ctx); err != nil { @@ -162,7 +162,7 @@ func (p *Postgres) openSecondaryConnection(dbname string) (*sql.DB, string, erro db.SetMaxIdleConns(1) db.SetConnMaxLifetime(10 * time.Minute) - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() if err := db.PingContext(ctx); err != nil { diff --git a/modules/postgres/config_schema.json b/modules/postgres/config_schema.json index 98a8616b7..83fa7484b 100644 --- a/modules/postgres/config_schema.json +++ b/modules/postgres/config_schema.json @@ -1,44 +1,51 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/postgres job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "dsn": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "collect_databases_matching": { - "type": "string" - }, - "transaction_time_histogram": { - "type": "array", - "items": { - "type": "number" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/postgres job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "collect_databases_matching": { + "type": "string" + }, + "transaction_time_histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "query_time_histogram": { + "type": "array", + "items": { + "type": "number" + } + }, + "max_db_tables": { + "type": "integer" + }, + "max_db_indexes": { + "type": "integer" } }, - "query_time_histogram": { - "type": "array", - "items": { - "type": "number" - } - }, - "max_db_tables": { - "type": "integer" - }, - "max_db_indexes": { - "type": "integer" - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/postgres/do_query.go b/modules/postgres/do_query.go index ea134ec5f..3b90be0d7 100644 --- a/modules/postgres/do_query.go +++ b/modules/postgres/do_query.go @@ -8,14 +8,14 @@ import ( ) func (p *Postgres) doQueryRow(query string, v any) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() return p.db.QueryRowContext(ctx, query).Scan(v) } func (p *Postgres) doDBQueryRow(db *sql.DB, query string, v any) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() return db.QueryRowContext(ctx, query).Scan(v) @@ -26,7 +26,7 @@ func (p *Postgres) doQuery(query string, assign func(column, value string, rowEn } func (p *Postgres) doDBQuery(db *sql.DB, query string, assign func(column, value string, rowEnd bool)) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() rows, err := db.QueryContext(ctx, query) diff --git a/modules/postgres/postgres.go b/modules/postgres/postgres.go index a1dabf9d3..b3a0b7579 100644 --- a/modules/postgres/postgres.go +++ b/modules/postgres/postgres.go @@ -5,6 +5,7 @@ package postgres import ( "database/sql" _ "embed" + "errors" "sync" "time" @@ -30,7 +31,7 @@ func init() { func New() *Postgres { return &Postgres{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), DSN: "postgres://postgres:postgres@127.0.0.1:5432/postgres", XactTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, QueryTimeHistogram: []float64{.1, .5, 1, 2.5, 5, 10}, @@ -99,28 +100,40 @@ type ( } ) -func (p *Postgres) Init() bool { +func (p *Postgres) Configuration() any { + return p.Config +} + +func (p *Postgres) Init() error { err := p.validateConfig() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } sr, err := p.initDBSelector() if err != nil { p.Errorf("config validation: %v", err) - return false + return err } p.dbSr = sr p.mx.xactTimeHist = metrics.NewHistogramWithRangeBuckets(p.XactTimeHistogram) p.mx.queryTimeHist = metrics.NewHistogramWithRangeBuckets(p.QueryTimeHistogram) - return true + return nil } -func (p *Postgres) Check() bool { - return len(p.Collect()) > 0 +func (p *Postgres) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Postgres) Charts() *module.Charts { diff --git a/modules/postgres/postgres_test.go b/modules/postgres/postgres_test.go index a41c11235..616098526 100644 --- a/modules/postgres/postgres_test.go +++ b/modules/postgres/postgres_test.go @@ -128,9 +128,9 @@ func TestPostgres_Init(t *testing.T) { pg.Config = test.config if test.wantFail { - assert.False(t, pg.Init()) + assert.Error(t, pg.Init()) } else { - assert.True(t, pg.Init()) + assert.NoError(t, pg.Init()) } }) } @@ -233,14 +233,14 @@ func TestPostgres_Check(t *testing.T) { pg.db = db defer func() { _ = db.Close() }() - require.True(t, pg.Init()) + require.NoError(t, pg.Init()) test.prepareMock(t, pg, mock) if test.wantFail { - assert.False(t, pg.Check()) + assert.Error(t, pg.Check()) } else { - assert.True(t, pg.Check()) + assert.NoError(t, pg.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -669,7 +669,7 @@ func TestPostgres_Collect(t *testing.T) { pg.db = db defer func() { _ = db.Close() }() - require.True(t, pg.Init()) + require.NoError(t, pg.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/powerdns/authoritativens.go b/modules/powerdns/authoritativens.go index 07b7fdbcf..506366396 100644 --- a/modules/powerdns/authoritativens.go +++ b/modules/powerdns/authoritativens.go @@ -4,6 +4,7 @@ package powerdns import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *AuthoritativeNS { URL: "http://127.0.0.1:8081", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -37,43 +38,57 @@ func New() *AuthoritativeNS { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type AuthoritativeNS struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` httpClient *http.Client charts *module.Charts } -func (ns *AuthoritativeNS) Init() bool { +func (ns *AuthoritativeNS) Configuration() any { + return ns.Config +} + +func (ns *AuthoritativeNS) Init() error { err := ns.validateConfig() if err != nil { ns.Errorf("config validation: %v", err) - return false + return err } client, err := ns.initHTTPClient() if err != nil { ns.Errorf("init HTTP client: %v", err) - return false + return err } ns.httpClient = client cs, err := ns.initCharts() if err != nil { ns.Errorf("init charts: %v", err) - return false + return err } ns.charts = cs - return true + return nil } -func (ns *AuthoritativeNS) Check() bool { - return len(ns.Collect()) > 0 +func (ns *AuthoritativeNS) Check() error { + mx, err := ns.collect() + if err != nil { + ns.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (ns *AuthoritativeNS) Charts() *module.Charts { diff --git a/modules/powerdns/authoritativens_test.go b/modules/powerdns/authoritativens_test.go index 71e5c6dc4..637f251c4 100644 --- a/modules/powerdns/authoritativens_test.go +++ b/modules/powerdns/authoritativens_test.go @@ -70,9 +70,9 @@ func TestRecursor_Init(t *testing.T) { ns.Config = test.config if test.wantFail { - assert.False(t, ns.Init()) + assert.Error(t, ns.Init()) } else { - assert.True(t, ns.Init()) + assert.NoError(t, ns.Init()) } }) } @@ -108,12 +108,12 @@ func TestRecursor_Check(t *testing.T) { t.Run(name, func(t *testing.T) { recursor, cleanup := test.prepare() defer cleanup() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) if test.wantFail { - assert.False(t, recursor.Check()) + assert.Error(t, recursor.Check()) } else { - assert.True(t, recursor.Check()) + assert.NoError(t, recursor.Check()) } }) } @@ -121,7 +121,7 @@ func TestRecursor_Check(t *testing.T) { func TestRecursor_Charts(t *testing.T) { recursor := New() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) assert.NotNil(t, recursor.Charts()) } @@ -236,7 +236,7 @@ func TestRecursor_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { ns, cleanup := test.prepare() defer cleanup() - require.True(t, ns.Init()) + require.NoError(t, ns.Init()) collected := ns.Collect() diff --git a/modules/powerdns/config_schema.json b/modules/powerdns/config_schema.json index 93f8e72a2..53a91fc9f 100644 --- a/modules/powerdns/config_schema.json +++ b/modules/powerdns/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/powerdns job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "PowerDNS collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the PowerDNS built-in webserver.", + "type": "string", + "default": "http://127.0.0.1:8081" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/powerdns/init.go b/modules/powerdns/init.go index a577db773..aefdc5cb9 100644 --- a/modules/powerdns/init.go +++ b/modules/powerdns/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (ns AuthoritativeNS) validateConfig() error { +func (ns *AuthoritativeNS) validateConfig() error { if ns.URL == "" { return errors.New("URL not set") } @@ -20,10 +20,10 @@ func (ns AuthoritativeNS) validateConfig() error { return nil } -func (ns AuthoritativeNS) initHTTPClient() (*http.Client, error) { +func (ns *AuthoritativeNS) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(ns.Client) } -func (ns AuthoritativeNS) initCharts() (*module.Charts, error) { +func (ns *AuthoritativeNS) initCharts() (*module.Charts, error) { return charts.Copy(), nil } diff --git a/modules/powerdns_recursor/config_schema.json b/modules/powerdns_recursor/config_schema.json index fcd19e150..dc22fd58e 100644 --- a/modules/powerdns_recursor/config_schema.json +++ b/modules/powerdns_recursor/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/powerdns_recursor job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "PowerDNS Recursor collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the PowerDNS built-in webserver.", + "type": "string", + "default": "http://127.0.0.1:8081" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/powerdns_recursor/init.go b/modules/powerdns_recursor/init.go index aa74eec2f..3d9e226bc 100644 --- a/modules/powerdns_recursor/init.go +++ b/modules/powerdns_recursor/init.go @@ -10,7 +10,7 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (r Recursor) validateConfig() error { +func (r *Recursor) validateConfig() error { if r.URL == "" { return errors.New("URL not set") } @@ -20,10 +20,10 @@ func (r Recursor) validateConfig() error { return nil } -func (r Recursor) initHTTPClient() (*http.Client, error) { +func (r *Recursor) initHTTPClient() (*http.Client, error) { return web.NewHTTPClient(r.Client) } -func (r Recursor) initCharts() (*module.Charts, error) { +func (r *Recursor) initCharts() (*module.Charts, error) { return charts.Copy(), nil } diff --git a/modules/powerdns_recursor/recursor.go b/modules/powerdns_recursor/recursor.go index cd052ba6d..f9e20bdc8 100644 --- a/modules/powerdns_recursor/recursor.go +++ b/modules/powerdns_recursor/recursor.go @@ -4,6 +4,7 @@ package powerdns_recursor import ( _ "embed" + "errors" "net/http" "time" @@ -29,7 +30,7 @@ func New() *Recursor { URL: "http://127.0.0.1:8081", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -37,43 +38,57 @@ func New() *Recursor { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type Recursor struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` httpClient *http.Client charts *module.Charts } -func (r *Recursor) Init() bool { +func (r *Recursor) Configuration() any { + return r.Config +} + +func (r *Recursor) Init() error { err := r.validateConfig() if err != nil { r.Errorf("config validation: %v", err) - return false + return err } client, err := r.initHTTPClient() if err != nil { r.Errorf("init HTTP client: %v", err) - return false + return err } r.httpClient = client cs, err := r.initCharts() if err != nil { r.Errorf("init charts: %v", err) - return false + return err } r.charts = cs - return true + return nil } -func (r *Recursor) Check() bool { - return len(r.Collect()) > 0 +func (r *Recursor) Check() error { + mx, err := r.collect() + if err != nil { + r.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (r *Recursor) Charts() *module.Charts { diff --git a/modules/powerdns_recursor/recursor_test.go b/modules/powerdns_recursor/recursor_test.go index 4ef3c2d08..64b6e36e3 100644 --- a/modules/powerdns_recursor/recursor_test.go +++ b/modules/powerdns_recursor/recursor_test.go @@ -70,9 +70,9 @@ func TestRecursor_Init(t *testing.T) { recursor.Config = test.config if test.wantFail { - assert.False(t, recursor.Init()) + assert.Error(t, recursor.Init()) } else { - assert.True(t, recursor.Init()) + assert.NoError(t, recursor.Init()) } }) } @@ -108,12 +108,12 @@ func TestRecursor_Check(t *testing.T) { t.Run(name, func(t *testing.T) { recursor, cleanup := test.prepare() defer cleanup() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) if test.wantFail { - assert.False(t, recursor.Check()) + assert.Error(t, recursor.Check()) } else { - assert.True(t, recursor.Check()) + assert.NoError(t, recursor.Check()) } }) } @@ -121,7 +121,7 @@ func TestRecursor_Check(t *testing.T) { func TestRecursor_Charts(t *testing.T) { recursor := New() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) assert.NotNil(t, recursor.Charts()) } @@ -271,7 +271,7 @@ func TestRecursor_Collect(t *testing.T) { t.Run(name, func(t *testing.T) { recursor, cleanup := test.prepare() defer cleanup() - require.True(t, recursor.Init()) + require.NoError(t, recursor.Init()) collected := recursor.Collect() diff --git a/modules/prometheus/config_schema.json b/modules/prometheus/config_schema.json index 60261d542..f74c04f49 100644 --- a/modules/prometheus/config_schema.json +++ b/modules/prometheus/config_schema.json @@ -1,113 +1,120 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/prometheus job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "selector": { - "type": "object", - "properties": { - "allow": { - "type": "array", - "items": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/prometheus job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "url": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "selector": { + "type": "object", + "properties": { + "allow": { + "type": "array", + "items": { + "type": "string" + } + }, + "deny": { + "type": "array", + "items": { + "type": "string" + } } }, - "deny": { - "type": "array", - "items": { - "type": "string" - } - } + "required": [ + "allow", + "deny" + ] }, - "required": [ - "allow", - "deny" - ] - }, - "fallback_type": { - "type": "object", - "properties": { - "counter": { - "type": "array", - "items": { - "type": "string" + "fallback_type": { + "type": "object", + "properties": { + "counter": { + "type": "array", + "items": { + "type": "string" + } + }, + "gauge": { + "type": "array", + "items": { + "type": "string" + } } }, - "gauge": { - "type": "array", - "items": { - "type": "string" - } + "required": [ + "counter", + "gauge" + ] + }, + "bearer_token": { + "type": "string" + }, + "expected_prefix": { + "type": "string" + }, + "max_time_series": { + "type": "integer" + }, + "max_time_series_per_metric": { + "type": "integer" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" } }, - "required": [ - "counter", - "gauge" - ] - }, - "bearer_token": { - "type": "string" - }, - "expected_prefix": { - "type": "string" - }, - "max_time_series": { - "type": "integer" - }, - "max_time_series_per_metric": { - "type": "integer" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/prometheus/prometheus.go b/modules/prometheus/prometheus.go index 32a91e5c2..e403329af 100644 --- a/modules/prometheus/prometheus.go +++ b/modules/prometheus/prometheus.go @@ -4,6 +4,7 @@ package prometheus import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -31,7 +32,7 @@ func New() *Prometheus { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 10}, + Timeout: web.Duration(time.Second * 10), }, }, MaxTS: 2000, @@ -44,16 +45,14 @@ func New() *Prometheus { type Config struct { web.HTTP `yaml:",inline"` - Name string `yaml:"name"` - Application string `yaml:"app"` - BearerTokenFile string `yaml:"bearer_token_file"` - - Selector selector.Expr `yaml:"selector"` - - ExpectedPrefix string `yaml:"expected_prefix"` - MaxTS int `yaml:"max_time_series"` - MaxTSPerMetric int `yaml:"max_time_series_per_metric"` - FallbackType struct { + Name string `yaml:"name"` + Application string `yaml:"app"` + BearerTokenFile string `yaml:"bearer_token_file"` + Selector selector.Expr `yaml:"selector"` + ExpectedPrefix string `yaml:"expected_prefix"` + MaxTS int `yaml:"max_time_series"` + MaxTSPerMetric int `yaml:"max_time_series_per_metric"` + FallbackType struct { Counter []string `yaml:"counter"` Gauge []string `yaml:"gauge"` } `yaml:"fallback_type"` @@ -74,38 +73,50 @@ type Prometheus struct { } } -func (p *Prometheus) Init() bool { +func (p *Prometheus) Configuration() any { + return p.Config +} + +func (p *Prometheus) Init() error { if err := p.validateConfig(); err != nil { p.Errorf("validating config: %v", err) - return false + return err } prom, err := p.initPrometheusClient() if err != nil { p.Errorf("init prometheus client: %v", err) - return false + return err } p.prom = prom m, err := p.initFallbackTypeMatcher(p.FallbackType.Counter) if err != nil { p.Errorf("init counter fallback type matcher: %v", err) - return false + return err } p.fallbackType.counter = m m, err = p.initFallbackTypeMatcher(p.FallbackType.Gauge) if err != nil { p.Errorf("init counter fallback type matcher: %v", err) - return false + return err } p.fallbackType.gauge = m - return true + return nil } -func (p *Prometheus) Check() bool { - return len(p.Collect()) > 0 +func (p *Prometheus) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *Prometheus) Charts() *module.Charts { @@ -124,4 +135,8 @@ func (p *Prometheus) Collect() map[string]int64 { return mx } -func (p *Prometheus) Cleanup() {} +func (p *Prometheus) Cleanup() { + if p.prom != nil && p.prom.HTTPClient() != nil { + p.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/prometheus/prometheus_test.go b/modules/prometheus/prometheus_test.go index 95bf55bd2..837041026 100644 --- a/modules/prometheus/prometheus_test.go +++ b/modules/prometheus/prometheus_test.go @@ -44,9 +44,9 @@ func TestPrometheus_Init(t *testing.T) { prom.Config = test.config if test.wantFail { - assert.False(t, prom.Init()) + assert.Error(t, prom.Init()) } else { - assert.True(t, prom.Init()) + assert.NoError(t, prom.Init()) } }) } @@ -57,7 +57,7 @@ func TestPrometheus_Cleanup(t *testing.T) { prom := New() prom.URL = "http://127.0.0.1" - require.True(t, prom.Init()) + require.NoError(t, prom.Init()) assert.NotPanics(t, prom.Cleanup) } @@ -169,12 +169,12 @@ test_counter_no_meta_metric_1_total{label1="value2"} 11 prom, cleanup := test.prepare() defer cleanup() - require.True(t, prom.Init()) + require.NoError(t, prom.Init()) if test.wantFail { - assert.False(t, prom.Check()) + assert.Error(t, prom.Check()) } else { - assert.True(t, prom.Check()) + assert.NoError(t, prom.Check()) } }) } @@ -558,7 +558,7 @@ test_gauge_no_meta_metric_1{label1="value2"} 12 defer srv.Close() prom.URL = srv.URL - require.True(t, prom.Init()) + require.NoError(t, prom.Init()) for num, step := range test.steps { t.Run(fmt.Sprintf("step num %d ('%s')", num+1, step.desc), func(t *testing.T) { diff --git a/modules/proxysql/collect.go b/modules/proxysql/collect.go index cc35fc02d..dfc559a97 100644 --- a/modules/proxysql/collect.go +++ b/modules/proxysql/collect.go @@ -225,14 +225,14 @@ func (p *ProxySQL) openConnection() error { } func (p *ProxySQL) doQueryRow(query string, v any) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() return p.db.QueryRowContext(ctx, query).Scan(v) } func (p *ProxySQL) doQuery(query string, assign func(column, value string, rowEnd bool)) error { - ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), p.Timeout.Duration()) defer cancel() rows, err := p.db.QueryContext(ctx, query) diff --git a/modules/proxysql/config_schema.json b/modules/proxysql/config_schema.json index 5fab79bc7..10fb336df 100644 --- a/modules/proxysql/config_schema.json +++ b/modules/proxysql/config_schema.json @@ -1,26 +1,33 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/proxysql job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/proxysql job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "dsn": { + "type": "string" + }, + "my.cnf": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "dsn": { - "type": "string" - }, - "my.cnf": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "dsn" + ] }, - "required": [ - "name", - "dsn" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/proxysql/proxysql.go b/modules/proxysql/proxysql.go index d52c36efd..c7eaffaf6 100644 --- a/modules/proxysql/proxysql.go +++ b/modules/proxysql/proxysql.go @@ -5,6 +5,7 @@ package proxysql import ( "database/sql" _ "embed" + "errors" _ "github.com/go-sql-driver/mysql" "sync" "time" @@ -27,7 +28,7 @@ func New() *ProxySQL { return &ProxySQL{ Config: Config{ DSN: "stats:stats@tcp(127.0.0.1:6032)/", - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, charts: baseCharts.Copy(), @@ -46,32 +47,43 @@ type Config struct { Timeout web.Duration `yaml:"timeout"` } -type ( - ProxySQL struct { - module.Base - Config `yaml:",inline"` +type ProxySQL struct { + module.Base + Config `yaml:",inline"` - db *sql.DB + db *sql.DB - charts *module.Charts + charts *module.Charts - once *sync.Once - cache *cache - } -) + once *sync.Once + cache *cache +} -func (p *ProxySQL) Init() bool { +func (p *ProxySQL) Configuration() any { + return p.Config +} + +func (p *ProxySQL) Init() error { if p.DSN == "" { - p.Error("'dsn' not set") - return false + p.Error("dsn not set") + return errors.New("dsn not set") } p.Debugf("using DSN [%s]", p.DSN) - return true + + return nil } -func (p *ProxySQL) Check() bool { - return len(p.Collect()) > 0 +func (p *ProxySQL) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (p *ProxySQL) Charts() *module.Charts { diff --git a/modules/proxysql/proxysql_test.go b/modules/proxysql/proxysql_test.go index ec31c4d85..d5d30aa07 100644 --- a/modules/proxysql/proxysql_test.go +++ b/modules/proxysql/proxysql_test.go @@ -62,9 +62,9 @@ func TestProxySQL_Init(t *testing.T) { proxySQL.Config = test.config if test.wantFail { - assert.False(t, proxySQL.Init()) + assert.Error(t, proxySQL.Init()) } else { - assert.True(t, proxySQL.Init()) + assert.NoError(t, proxySQL.Init()) } }) } @@ -165,14 +165,14 @@ func TestProxySQL_Check(t *testing.T) { proxySQL.db = db defer func() { _ = db.Close() }() - require.True(t, proxySQL.Init()) + require.NoError(t, proxySQL.Init()) test.prepareMock(t, mock) if test.wantFail { - assert.False(t, proxySQL.Check()) + assert.Error(t, proxySQL.Check()) } else { - assert.True(t, proxySQL.Check()) + assert.NoError(t, proxySQL.Check()) } assert.NoError(t, mock.ExpectationsWereMet()) }) @@ -1152,7 +1152,7 @@ func TestProxySQL_Collect(t *testing.T) { my.db = db defer func() { _ = db.Close() }() - require.True(t, my.Init()) + require.NoError(t, my.Init()) for i, step := range test { t.Run(fmt.Sprintf("step[%d]", i), func(t *testing.T) { diff --git a/modules/pulsar/cache.go b/modules/pulsar/cache.go new file mode 100644 index 000000000..7f113bf86 --- /dev/null +++ b/modules/pulsar/cache.go @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +func newCache() *cache { + return &cache{ + namespaces: make(map[namespace]bool), + topics: make(map[topic]bool), + } +} + +type ( + namespace struct{ name string } + topic struct{ namespace, name string } + cache struct { + namespaces map[namespace]bool + topics map[topic]bool + } +) diff --git a/modules/pulsar/config_schema.json b/modules/pulsar/config_schema.json index 083eb0b98..361781a07 100644 --- a/modules/pulsar/config_schema.json +++ b/modules/pulsar/config_schema.json @@ -1,76 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/pulsar job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pulsar collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "topic_filter": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Pulsar metrics endpoint.", + "type": "string", + "default": "http://127.0.0.1:8080/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 60 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 5 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" } }, - "excludes": { - "type": "array", - "items": { + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", "type": "string" } } } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/pulsar/init.go b/modules/pulsar/init.go new file mode 100644 index 000000000..d1302bd01 --- /dev/null +++ b/modules/pulsar/init.go @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package pulsar + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/matcher" + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (p *Pulsar) validateConfig() error { + if p.URL == "" { + return errors.New("url not set") + } + return nil +} + +func (p *Pulsar) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(p.Client) + if err != nil { + return nil, err + } + + return prometheus.New(client, p.Request), nil +} + +func (p *Pulsar) initTopicFilerMatcher() (matcher.Matcher, error) { + if p.TopicFiler.Empty() { + return matcher.TRUE(), nil + } + return p.TopicFiler.Parse() +} diff --git a/modules/pulsar/pulsar.go b/modules/pulsar/pulsar.go index 8b0ce9101..6d292cd7f 100644 --- a/modules/pulsar/pulsar.go +++ b/modules/pulsar/pulsar.go @@ -29,22 +29,21 @@ func init() { } func New() *Pulsar { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:8080/metrics", + return &Pulsar{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8080/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 5), + }, }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + TopicFiler: matcher.SimpleExpr{ + Includes: nil, + Excludes: []string{"*"}, }, }, - TopicFiler: matcher.SimpleExpr{ - Includes: nil, - Excludes: []string{"*"}, - }, - } - return &Pulsar{ - Config: config, once: &sync.Once{}, charts: summaryCharts.Copy(), nsCharts: namespaceCharts.Copy(), @@ -54,90 +53,64 @@ func New() *Pulsar { } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - TopicFiler matcher.SimpleExpr `yaml:"topic_filter"` - } - - Pulsar struct { - module.Base - Config `yaml:",inline"` - - prom prometheus.Prometheus - topicFilter matcher.Matcher - cache *cache - curCache *cache - once *sync.Once - charts *Charts - nsCharts *Charts - topicChartsMapping map[string]string - } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` - namespace struct{ name string } - topic struct{ namespace, name string } - cache struct { - namespaces map[namespace]bool - topics map[topic]bool - } -) + web.HTTP `yaml:",inline" json:",inline"` + TopicFiler matcher.SimpleExpr `yaml:"topic_filter"` +} -func newCache() *cache { - return &cache{ - namespaces: make(map[namespace]bool), - topics: make(map[topic]bool), - } +type Pulsar struct { + module.Base + Config `yaml:",inline" json:",inline"` + + prom prometheus.Prometheus + topicFilter matcher.Matcher + cache *cache + curCache *cache + once *sync.Once + charts *Charts + nsCharts *Charts + topicChartsMapping map[string]string } -func (p Pulsar) validateConfig() error { - if p.URL == "" { - return errors.New("URL is not set") - } - return nil +func (p *Pulsar) Configuration() any { + return p.Config } -func (p *Pulsar) initClient() error { - client, err := web.NewHTTPClient(p.Client) - if err != nil { +func (p *Pulsar) Init() error { + if err := p.validateConfig(); err != nil { + p.Errorf("config validation: %v", err) return err } - p.prom = prometheus.New(client, p.Request) - return nil -} - -func (p *Pulsar) initTopicFiler() error { - if p.TopicFiler.Empty() { - p.topicFilter = matcher.TRUE() - return nil + prom, err := p.initPrometheusClient() + if err != nil { + p.Error(err) + return err } + p.prom = prom - m, err := p.TopicFiler.Parse() + m, err := p.initTopicFilerMatcher() if err != nil { + p.Error(err) return err } p.topicFilter = m + return nil } -func (p *Pulsar) Init() bool { - if err := p.validateConfig(); err != nil { - p.Errorf("config validation: %v", err) - return false - } - if err := p.initClient(); err != nil { - p.Errorf("client initializing: %v", err) - return false +func (p *Pulsar) Check() error { + mx, err := p.collect() + if err != nil { + p.Error(err) + return err } - if err := p.initTopicFiler(); err != nil { - p.Errorf("topic filer initialization: %v", err) - return false + if len(mx) == 0 { + return errors.New("no metrics collected") } - return true -} - -func (p *Pulsar) Check() bool { - return len(p.Collect()) > 0 + return nil } func (p *Pulsar) Charts() *Charts { @@ -156,4 +129,8 @@ func (p *Pulsar) Collect() map[string]int64 { return mx } -func (Pulsar) Cleanup() {} +func (p *Pulsar) Cleanup() { + if p.prom != nil && p.prom.HTTPClient() != nil { + p.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/pulsar/pulsar_test.go b/modules/pulsar/pulsar_test.go index 3bf9468b6..418e4102d 100644 --- a/modules/pulsar/pulsar_test.go +++ b/modules/pulsar/pulsar_test.go @@ -71,9 +71,9 @@ func TestPulsar_Init(t *testing.T) { pulsar.Config = test.config if test.wantFail { - assert.False(t, pulsar.Init()) + assert.Error(t, pulsar.Init()) } else { - assert.True(t, pulsar.Init()) + assert.NoError(t, pulsar.Init()) } }) } @@ -102,9 +102,9 @@ func TestPulsar_Check(t *testing.T) { defer srv.Close() if test.wantFail { - assert.False(t, pulsar.Check()) + assert.Error(t, pulsar.Check()) } else { - assert.True(t, pulsar.Check()) + assert.NoError(t, pulsar.Check()) } }) } @@ -225,7 +225,7 @@ func prepareClientServerStdV250Namespaces(t *testing.T) (*Pulsar, *httptest.Serv pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -239,7 +239,7 @@ func prepareClientServerStdV250Topics(t *testing.T) (*Pulsar, *httptest.Server) pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -267,7 +267,7 @@ func prepareClientServersDynamicStdV250Topics(t *testing.T) (*Pulsar, *httptest. pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -281,7 +281,7 @@ func prepareClientServerNonPulsar(t *testing.T) (*Pulsar, *httptest.Server) { pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -295,7 +295,7 @@ func prepareClientServerInvalidData(t *testing.T) (*Pulsar, *httptest.Server) { pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -309,7 +309,7 @@ func prepareClientServer404(t *testing.T) (*Pulsar, *httptest.Server) { pulsar := New() pulsar.URL = srv.URL - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } @@ -320,7 +320,7 @@ func prepareClientServerConnectionRefused(t *testing.T) (*Pulsar, *httptest.Serv pulsar := New() pulsar.URL = "http://127.0.0.1:38001/metrics" - require.True(t, pulsar.Init()) + require.NoError(t, pulsar.Init()) return pulsar, srv } diff --git a/modules/rabbitmq/config_schema.json b/modules/rabbitmq/config_schema.json index ad9f0e7b0..b1611bab2 100644 --- a/modules/rabbitmq/config_schema.json +++ b/modules/rabbitmq/config_schema.json @@ -1,62 +1,250 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/rabbitmq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "RabbitMQ collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "url": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the RabbitMQ management API.", + "type": "string", + "default": "https://127.0.0.1" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + }, + "collect_queues_metrics": { + "title": "Collect Queues Metrics", + "description": "Collect stats for each queue of each virtual host. Enabling this can introduce serious overhead on both Netdata and RabbitMQ if many queues are configured and used.", + "type": "boolean", + "default": false + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "collect_queues_metrics": { - "type": "boolean" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "username": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" + "ui:widget": "password" }, "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/rabbitmq/rabbitmq.go b/modules/rabbitmq/rabbitmq.go index 59fe4b153..f7611faed 100644 --- a/modules/rabbitmq/rabbitmq.go +++ b/modules/rabbitmq/rabbitmq.go @@ -4,6 +4,7 @@ package rabbitmq import ( _ "embed" + "errors" "net/http" "time" @@ -31,7 +32,7 @@ func New() *RabbitMQ { Password: "guest", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, CollectQueues: false, @@ -43,14 +44,16 @@ func New() *RabbitMQ { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` CollectQueues bool `yaml:"collect_queues_metrics"` } type ( RabbitMQ struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts @@ -66,27 +69,39 @@ type ( } ) -func (r *RabbitMQ) Init() bool { +func (r *RabbitMQ) Configuration() any { + return r.Config +} + +func (r *RabbitMQ) Init() error { if r.URL == "" { r.Error("'url' can not be empty") - return false + return errors.New("url not set") } client, err := web.NewHTTPClient(r.Client) if err != nil { r.Errorf("init HTTP client: %v", err) - return false + return err } r.httpClient = client r.Debugf("using URL %s", r.URL) - r.Debugf("using timeout: %s", r.Timeout.Duration) + r.Debugf("using timeout: %s", r.Timeout) - return true + return nil } -func (r *RabbitMQ) Check() bool { - return len(r.Collect()) > 0 +func (r *RabbitMQ) Check() error { + mx, err := r.collect() + if err != nil { + r.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (r *RabbitMQ) Charts() *module.Charts { diff --git a/modules/rabbitmq/rabbitmq_test.go b/modules/rabbitmq/rabbitmq_test.go index c365726aa..caed7c126 100644 --- a/modules/rabbitmq/rabbitmq_test.go +++ b/modules/rabbitmq/rabbitmq_test.go @@ -58,9 +58,9 @@ func TestRabbitMQ_Init(t *testing.T) { rabbit.Config = test.config if test.wantFail { - assert.False(t, rabbit.Init()) + assert.Error(t, rabbit.Init()) } else { - assert.True(t, rabbit.Init()) + assert.NoError(t, rabbit.Init()) } }) } @@ -74,7 +74,7 @@ func TestRabbitMQ_Cleanup(t *testing.T) { assert.NotPanics(t, New().Cleanup) rabbit := New() - require.True(t, rabbit.Init()) + require.NoError(t, rabbit.Init()) assert.NotPanics(t, rabbit.Cleanup) } @@ -94,12 +94,12 @@ func TestRabbitMQ_Check(t *testing.T) { rabbit, cleanup := test.prepare() defer cleanup() - require.True(t, rabbit.Init()) + require.NoError(t, rabbit.Init()) if test.wantFail { - assert.False(t, rabbit.Check()) + assert.Error(t, rabbit.Check()) } else { - assert.True(t, rabbit.Check()) + assert.NoError(t, rabbit.Check()) } }) } @@ -285,7 +285,7 @@ func TestRabbitMQ_Collect(t *testing.T) { rabbit, cleanup := test.prepare() defer cleanup() - require.True(t, rabbit.Init()) + require.NoError(t, rabbit.Init()) mx := rabbit.Collect() diff --git a/modules/redis/config_schema.json b/modules/redis/config_schema.json index ed25da9de..35ed01d6f 100644 --- a/modules/redis/config_schema.json +++ b/modules/redis/config_schema.json @@ -1,44 +1,51 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/redis job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "address": { - "type": "string" - }, - "password": { - "type": "string" - }, - "username": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "ping_samples": { - "type": "integer" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/redis job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "password": { + "type": "string" + }, + "username": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "ping_samples": { + "type": "integer" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/redis/init.go b/modules/redis/init.go index ffed274c3..072febb17 100644 --- a/modules/redis/init.go +++ b/modules/redis/init.go @@ -42,9 +42,9 @@ func (r *Redis) initRedisClient() (*redis.Client, error) { opts.PoolSize = 1 opts.TLSConfig = tlsConfig - opts.DialTimeout = r.Timeout.Duration - opts.ReadTimeout = r.Timeout.Duration - opts.WriteTimeout = r.Timeout.Duration + opts.DialTimeout = r.Timeout.Duration() + opts.ReadTimeout = r.Timeout.Duration() + opts.WriteTimeout = r.Timeout.Duration() return redis.NewClient(opts), nil } diff --git a/modules/redis/redis.go b/modules/redis/redis.go index 2117cc2ce..96be5e303 100644 --- a/modules/redis/redis.go +++ b/modules/redis/redis.go @@ -5,6 +5,7 @@ package redis import ( "context" _ "embed" + "errors" "sync" "time" @@ -31,7 +32,7 @@ func New() *Redis { return &Redis{ Config: Config{ Address: "redis://@localhost:6379", - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), PingSamples: 5, }, @@ -79,32 +80,44 @@ type ( } ) -func (r *Redis) Init() bool { +func (r *Redis) Configuration() any { + return r.Config +} + +func (r *Redis) Init() error { err := r.validateConfig() if err != nil { r.Errorf("config validation: %v", err) - return false + return err } rdb, err := r.initRedisClient() if err != nil { r.Errorf("init redis client: %v", err) - return false + return err } r.rdb = rdb charts, err := r.initCharts() if err != nil { r.Errorf("init charts: %v", err) - return false + return err } r.charts = charts - return true + return nil } -func (r *Redis) Check() bool { - return len(r.Collect()) > 0 +func (r *Redis) Check() error { + mx, err := r.collect() + if err != nil { + r.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (r *Redis) Charts() *module.Charts { diff --git a/modules/redis/redis_test.go b/modules/redis/redis_test.go index 9ee2f54f0..d9a35ad57 100644 --- a/modules/redis/redis_test.go +++ b/modules/redis/redis_test.go @@ -65,9 +65,9 @@ func TestRedis_Init(t *testing.T) { rdb.Config = test.config if test.wantFail { - assert.False(t, rdb.Init()) + assert.Error(t, rdb.Init()) } else { - assert.True(t, rdb.Init()) + assert.NoError(t, rdb.Init()) } }) } @@ -96,9 +96,9 @@ func TestRedis_Check(t *testing.T) { rdb := test.prepare(t) if test.wantFail { - assert.False(t, rdb.Check()) + assert.Error(t, rdb.Check()) } else { - assert.True(t, rdb.Check()) + assert.NoError(t, rdb.Check()) } }) } @@ -106,7 +106,7 @@ func TestRedis_Check(t *testing.T) { func TestRedis_Charts(t *testing.T) { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) assert.NotNil(t, rdb.Charts()) } @@ -115,7 +115,7 @@ func TestRedis_Cleanup(t *testing.T) { rdb := New() assert.NotPanics(t, rdb.Cleanup) - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) m := &mockRedisClient{} rdb.rdb = m @@ -308,7 +308,7 @@ func TestRedis_Collect(t *testing.T) { func prepareRedisV609(t *testing.T) *Redis { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) rdb.rdb = &mockRedisClient{ result: v609InfoAll, } @@ -317,7 +317,7 @@ func prepareRedisV609(t *testing.T) *Redis { func prepareRedisErrorOnInfo(t *testing.T) *Redis { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) rdb.rdb = &mockRedisClient{ errOnInfo: true, } @@ -326,7 +326,7 @@ func prepareRedisErrorOnInfo(t *testing.T) *Redis { func prepareRedisWithPikaMetrics(t *testing.T) *Redis { rdb := New() - require.True(t, rdb.Init()) + require.NoError(t, rdb.Init()) rdb.rdb = &mockRedisClient{ result: pikaInfoAll, } diff --git a/modules/scaleio/collect_sdc.go b/modules/scaleio/collect_sdc.go index 495b1a031..be05f5c33 100644 --- a/modules/scaleio/collect_sdc.go +++ b/modules/scaleio/collect_sdc.go @@ -4,7 +4,7 @@ package scaleio import "github.com/netdata/go.d.plugin/modules/scaleio/client" -func (s ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics { +func (s *ScaleIO) collectSdc(ss map[string]client.SdcStatistics) map[string]sdcMetrics { ms := make(map[string]sdcMetrics, len(ss)) for id, stats := range ss { diff --git a/modules/scaleio/collect_storage_pool.go b/modules/scaleio/collect_storage_pool.go index 7a41b66bd..dcaf01950 100644 --- a/modules/scaleio/collect_storage_pool.go +++ b/modules/scaleio/collect_storage_pool.go @@ -4,7 +4,7 @@ package scaleio import "github.com/netdata/go.d.plugin/modules/scaleio/client" -func (s ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics { +func (s *ScaleIO) collectStoragePool(ss map[string]client.StoragePoolStatistics) map[string]storagePoolMetrics { ms := make(map[string]storagePoolMetrics, len(ss)) for id, stats := range ss { diff --git a/modules/scaleio/collect_system.go b/modules/scaleio/collect_system.go index 6806e1969..e28fcee6c 100644 --- a/modules/scaleio/collect_system.go +++ b/modules/scaleio/collect_system.go @@ -4,7 +4,7 @@ package scaleio import "github.com/netdata/go.d.plugin/modules/scaleio/client" -func (ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics { +func (s *ScaleIO) collectSystem(ss client.SystemStatistics) systemMetrics { var sm systemMetrics collectSystemCapacity(&sm, ss) collectSystemWorkload(&sm, ss) diff --git a/modules/scaleio/config_schema.json b/modules/scaleio/config_schema.json index 66230acc9..6b814cb4a 100644 --- a/modules/scaleio/config_schema.json +++ b/modules/scaleio/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/scaleio job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "ScaleIO collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the VxFlex OS Gateway API.", + "type": "string", + "default": "http://127.0.0.1/stub_status" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/scaleio/scaleio.go b/modules/scaleio/scaleio.go index 05bb03c5b..1795ff276 100644 --- a/modules/scaleio/scaleio.go +++ b/modules/scaleio/scaleio.go @@ -4,6 +4,7 @@ package scaleio import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/modules/scaleio/client" @@ -24,32 +25,34 @@ func init() { // New creates ScaleIO with default values. func New() *ScaleIO { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "https://127.0.0.1", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &ScaleIO{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "https://127.0.0.1", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - return &ScaleIO{ - Config: config, charts: systemCharts.Copy(), charted: make(map[string]bool), } } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` +} + type ( - // Config is the ScaleIO module configuration. - Config struct { - web.HTTP `yaml:",inline"` - } // ScaleIO ScaleIO module. ScaleIO struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` + client *client.Client charts *module.Charts @@ -65,32 +68,45 @@ type ( } ) +func (s *ScaleIO) Configuration() any { + return s.Config +} + // Init makes initialization. -func (s *ScaleIO) Init() bool { +func (s *ScaleIO) Init() error { if s.Username == "" || s.Password == "" { s.Error("username and password aren't set") - return false + return errors.New("username and password aren't set") } c, err := client.New(s.Client, s.Request) if err != nil { s.Errorf("error on creating ScaleIO client: %v", err) - return false + return err } s.client = c s.Debugf("using URL %s", s.URL) - s.Debugf("using timeout: %s", s.Timeout.Duration) - return true + s.Debugf("using timeout: %s", s.Timeout) + + return nil } // Check makes check. -func (s *ScaleIO) Check() bool { +func (s *ScaleIO) Check() error { if err := s.client.Login(); err != nil { s.Error(err) - return false + return err + } + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") } - return len(s.Collect()) > 0 + return nil } // Charts returns Charts. diff --git a/modules/scaleio/scaleio_test.go b/modules/scaleio/scaleio_test.go index 5547b174b..3443bd518 100644 --- a/modules/scaleio/scaleio_test.go +++ b/modules/scaleio/scaleio_test.go @@ -34,10 +34,10 @@ func TestScaleIO_Init(t *testing.T) { scaleIO.Username = "username" scaleIO.Password = "password" - assert.True(t, scaleIO.Init()) + assert.NoError(t, scaleIO.Init()) } func TestScaleIO_Init_UsernameAndPasswordNotSet(t *testing.T) { - assert.False(t, New().Init()) + assert.Error(t, New().Init()) } func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) { @@ -46,24 +46,24 @@ func TestScaleIO_Init_ErrorOnCreatingClientWrongTLSCA(t *testing.T) { job.Password = "password" job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestScaleIO_Check(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) + require.NoError(t, scaleIO.Init()) - assert.True(t, scaleIO.Check()) + assert.NoError(t, scaleIO.Check()) } func TestScaleIO_Check_ErrorOnLogin(t *testing.T) { srv, mock, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) + require.NoError(t, scaleIO.Init()) mock.Password = "new password" - assert.False(t, scaleIO.Check()) + assert.Error(t, scaleIO.Check()) } func TestScaleIO_Charts(t *testing.T) { @@ -73,8 +73,8 @@ func TestScaleIO_Charts(t *testing.T) { func TestScaleIO_Cleanup(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) - require.True(t, scaleIO.Check()) + require.NoError(t, scaleIO.Init()) + require.NoError(t, scaleIO.Check()) scaleIO.Cleanup() assert.False(t, scaleIO.client.LoggedIn()) @@ -83,8 +83,8 @@ func TestScaleIO_Cleanup(t *testing.T) { func TestScaleIO_Collect(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) - require.True(t, scaleIO.Check()) + require.NoError(t, scaleIO.Init()) + require.NoError(t, scaleIO.Check()) expected := map[string]int64{ "sdc_6076fd0f00000000_bandwidth_read": 0, @@ -297,8 +297,8 @@ func TestScaleIO_Collect(t *testing.T) { func TestScaleIO_Collect_ConnectionRefused(t *testing.T) { srv, _, scaleIO := prepareSrvMockScaleIO(t) defer srv.Close() - require.True(t, scaleIO.Init()) - require.True(t, scaleIO.Check()) + require.NoError(t, scaleIO.Init()) + require.NoError(t, scaleIO.Check()) scaleIO.client.Request.URL = "http://127.0.0.1:38001" assert.Nil(t, scaleIO.Collect()) diff --git a/modules/snmp/config_schema.json b/modules/snmp/config_schema.json index dd4e9c3ca..094700544 100644 --- a/modules/snmp/config_schema.json +++ b/modules/snmp/config_schema.json @@ -1,188 +1,195 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "update_every": { - "type": "integer" - }, - "hostname": { - "type": "string" - }, - "community": { - "type": "string" - }, - "user": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "level": { - "type": "string", - "enum": [ - "none", - "authNoPriv", - "authPriv" - ] - }, - "auth_proto": { - "type": "string", - "enum": [ - "none", - "md5", - "sha", - "sha224", - "sha256", - "sha384", - "sha512" - ] - }, - "auth_key": { - "type": "string" - }, - "priv_proto": { - "type": "string", - "enum": [ - "none", - "des", - "aes", - "aes192", - "aes256", - "aes192c" - ] - }, - "priv_key": { - "type": "string" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "name": { + "type": "string" }, - "required": [ - "name", - "level", - "auth_proto", - "auth_key", - "priv_proto", - "priv_key" - ] - }, - "options": { - "type": "object", - "properties": { - "port": { - "type": "integer" - }, - "retries": { - "type": "integer" - }, - "timeout": { - "type": "integer" - }, - "version": { - "type": "string", - "enum": [ - "1", - "2", - "3" - ] - }, - "max_request_size": { - "type": "integer" - } + "update_every": { + "type": "integer" }, - "required": [ - "port", - "retries", - "timeout", - "version", - "max_request_size" - ] - }, - "charts": { - "type": "array", - "items": { + "hostname": { + "type": "string" + }, + "community": { + "type": "string" + }, + "user": { "type": "object", "properties": { - "id": { + "name": { "type": "string" }, - "title": { - "type": "string" + "level": { + "type": "string", + "enum": [ + "none", + "authNoPriv", + "authPriv" + ] }, - "units": { - "type": "string" + "auth_proto": { + "type": "string", + "enum": [ + "none", + "md5", + "sha", + "sha224", + "sha256", + "sha384", + "sha512" + ] }, - "family": { + "auth_key": { "type": "string" }, - "type": { + "priv_proto": { + "type": "string", + "enum": [ + "none", + "des", + "aes", + "aes192", + "aes256", + "aes192c" + ] + }, + "priv_key": { "type": "string" + } + }, + "required": [ + "name", + "level", + "auth_proto", + "auth_key", + "priv_proto", + "priv_key" + ] + }, + "options": { + "type": "object", + "properties": { + "port": { + "type": "integer" }, - "priority": { + "retries": { "type": "integer" }, - "multiply_range": { - "type": "array", - "items": { - "type": "integer" - } + "timeout": { + "type": "integer" }, - "dimensions": { - "type": "array", - "items": { - "type": "object", - "properties": { - "oid": { - "type": "string" - }, - "name": { - "type": "string" - }, - "algorithm": { - "type": "string", - "enum": [ - "absolute", - "incremental" - ] - }, - "multiplier": { - "type": "integer" - }, - "divisor": { - "type": "integer" - } - }, - "required": [ - "oid", - "name", - "algorithm", - "multiplier", - "divisor" - ] - } + "version": { + "type": "string", + "enum": [ + "1", + "2", + "3" + ] + }, + "max_request_size": { + "type": "integer" } }, "required": [ - "id", - "title", - "units", - "family", - "type", - "priority", - "multiply_range", - "dimensions" + "port", + "retries", + "timeout", + "version", + "max_request_size" ] + }, + "charts": { + "type": "array", + "items": { + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "title": { + "type": "string" + }, + "units": { + "type": "string" + }, + "family": { + "type": "string" + }, + "type": { + "type": "string" + }, + "priority": { + "type": "integer" + }, + "multiply_range": { + "type": "array", + "items": { + "type": "integer" + } + }, + "dimensions": { + "type": "array", + "items": { + "type": "object", + "properties": { + "oid": { + "type": "string" + }, + "name": { + "type": "string" + }, + "algorithm": { + "type": "string", + "enum": [ + "absolute", + "incremental" + ] + }, + "multiplier": { + "type": "integer" + }, + "divisor": { + "type": "integer" + } + }, + "required": [ + "oid", + "name", + "algorithm", + "multiplier", + "divisor" + ] + } + } + }, + "required": [ + "id", + "title", + "units", + "family", + "type", + "priority", + "multiply_range", + "dimensions" + ] + } } - } + }, + "required": [ + "name", + "update_every", + "hostname", + "community", + "user", + "options", + "charts" + ] }, - "required": [ - "name", - "update_every", - "hostname", - "community", - "user", - "options", - "charts" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/snmp/init.go b/modules/snmp/init.go index 802430936..5802d6682 100644 --- a/modules/snmp/init.go +++ b/modules/snmp/init.go @@ -12,7 +12,7 @@ import ( var newSNMPClient = gosnmp.NewHandler -func (s SNMP) validateConfig() error { +func (s *SNMP) validateConfig() error { if len(s.ChartsInput) == 0 { return errors.New("'charts' are required but not set") } @@ -35,7 +35,7 @@ func (s SNMP) validateConfig() error { return nil } -func (s SNMP) initSNMPClient() (gosnmp.Handler, error) { +func (s *SNMP) initSNMPClient() (gosnmp.Handler, error) { client := newSNMPClient() if client.SetTarget(s.Hostname); client.Target() == "" { @@ -96,7 +96,7 @@ func (s SNMP) initSNMPClient() (gosnmp.Handler, error) { return client, nil } -func (s SNMP) initOIDs() (oids []string) { +func (s *SNMP) initOIDs() (oids []string) { for _, c := range *s.charts { for _, d := range c.Dims { oids = append(oids, d.ID) diff --git a/modules/snmp/snmp.go b/modules/snmp/snmp.go index 7aa933f64..102b73b64 100644 --- a/modules/snmp/snmp.go +++ b/modules/snmp/snmp.go @@ -4,6 +4,7 @@ package snmp import ( _ "embed" + "errors" "fmt" "strings" @@ -104,17 +105,21 @@ type SNMP struct { oids []string } -func (s *SNMP) Init() bool { +func (s *SNMP) Configuration() any { + return s.Config +} + +func (s *SNMP) Init() error { err := s.validateConfig() if err != nil { s.Errorf("config validation: %v", err) - return false + return err } snmpClient, err := s.initSNMPClient() if err != nil { s.Errorf("SNMP client initialization: %v", err) - return false + return err } s.Info(snmpClientConnInfo(snmpClient)) @@ -122,24 +127,32 @@ func (s *SNMP) Init() bool { err = snmpClient.Connect() if err != nil { s.Errorf("SNMP client connect: %v", err) - return false + return err } s.snmpClient = snmpClient charts, err := newCharts(s.ChartsInput) if err != nil { s.Errorf("Population of charts failed: %v", err) - return false + return err } s.charts = charts s.oids = s.initOIDs() - return true + return nil } -func (s *SNMP) Check() bool { - return len(s.Collect()) > 0 +func (s *SNMP) Check() error { + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (s *SNMP) Charts() *module.Charts { diff --git a/modules/snmp/snmp_test.go b/modules/snmp/snmp_test.go index 9f1ef0e90..f888a7e5f 100644 --- a/modules/snmp/snmp_test.go +++ b/modules/snmp/snmp_test.go @@ -107,9 +107,9 @@ func TestSNMP_Init(t *testing.T) { snmp := test.prepareSNMP() if test.wantFail { - assert.False(t, snmp.Init()) + assert.Error(t, snmp.Init()) } else { - assert.True(t, snmp.Init()) + assert.NoError(t, snmp.Init()) } }) } @@ -209,12 +209,12 @@ func TestSNMP_Check(t *testing.T) { defaultMockExpects(mockSNMP) snmp := test.prepareSNMP(mockSNMP) - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) if test.wantFail { - assert.False(t, snmp.Check()) + assert.Error(t, snmp.Check()) } else { - assert.True(t, snmp.Check()) + assert.NoError(t, snmp.Check()) } }) } @@ -311,7 +311,7 @@ func TestSNMP_Collect(t *testing.T) { defaultMockExpects(mockSNMP) snmp := test.prepareSNMP(mockSNMP) - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) collected := snmp.Collect() @@ -328,7 +328,7 @@ func TestSNMP_Cleanup(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareV2Config() - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) m.EXPECT().Close().Times(1) @@ -339,7 +339,7 @@ func TestSNMP_Cleanup(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareV2Config() - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) snmp.snmpClient = nil return snmp @@ -371,7 +371,7 @@ func TestSNMP_Charts(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareV2Config() - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) return snmp }, @@ -381,7 +381,7 @@ func TestSNMP_Charts(t *testing.T) { prepareSNMP: func(t *testing.T, m *snmpmock.MockHandler) *SNMP { snmp := New() snmp.Config = prepareConfigWithIndexRange(prepareV2Config, 0, 9) - require.True(t, snmp.Init()) + require.NoError(t, snmp.Init()) return snmp }, diff --git a/modules/solr/README.md b/modules/solr/README.md deleted file mode 120000 index 0bca1b31a..000000000 --- a/modules/solr/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/solr.md \ No newline at end of file diff --git a/modules/solr/charts.go b/modules/solr/charts.go deleted file mode 100644 index caaa72489..000000000 --- a/modules/solr/charts.go +++ /dev/null @@ -1,141 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package solr - -import ( - "github.com/netdata/go.d.plugin/agent/module" -) - -type ( - // Charts is an alias for module.Charts - Charts = module.Charts - // Dims is an alias for module.Dims - Dims = module.Dims -) - -var charts = Charts{ - { - ID: "search_requests", - Title: "Search Requests", - Units: "requests/s", - Ctx: "solr.search_requests", - Dims: Dims{ - {ID: "query_requests_count", Name: "search", Algo: module.Incremental}, - }, - }, - { - ID: "search_errors", - Title: "Search Errors", - Units: "errors/s", - Ctx: "solr.search_errors", - Dims: Dims{ - {ID: "query_errors_count", Name: "errors", Algo: module.Incremental}, - }, - }, - { - ID: "search_errors_by_type", - Title: "Search Errors By Type", - Units: "errors/s", - Ctx: "solr.search_errors_by_type", - Dims: Dims{ - {ID: "query_clientErrors_count", Name: "client", Algo: module.Incremental}, - {ID: "query_serverErrors_count", Name: "server", Algo: module.Incremental}, - {ID: "query_timeouts_count", Name: "timeouts", Algo: module.Incremental}, - }, - }, - { - ID: "search_requests_processing_time", - Title: "Search Requests Processing Time", - Units: "milliseconds", - Ctx: "solr.search_requests_processing_time", - Dims: Dims{ - {ID: "query_totalTime_count", Name: "time", Algo: module.Incremental}, - }, - }, - { - ID: "search_requests_timings", - Title: "Search Requests Timings", - Units: "milliseconds", - Ctx: "solr.search_requests_timings", - Dims: Dims{ - {ID: "query_requestTimes_min_ms", Name: "min", Div: 1000000}, - {ID: "query_requestTimes_median_ms", Name: "median", Div: 1000000}, - {ID: "query_requestTimes_mean_ms", Name: "mean", Div: 1000000}, - {ID: "query_requestTimes_max_ms", Name: "max", Div: 1000000}, - }, - }, - { - ID: "search_requests_processing_time_percentile", - Title: "Search Requests Processing Time Percentile", - Units: "milliseconds", - Ctx: "solr.search_requests_processing_time_percentile", - Dims: Dims{ - {ID: "query_requestTimes_p75_ms", Name: "p75", Div: 1000000}, - {ID: "query_requestTimes_p95_ms", Name: "p95", Div: 1000000}, - {ID: "query_requestTimes_p99_ms", Name: "p99", Div: 1000000}, - {ID: "query_requestTimes_p999_ms", Name: "p999", Div: 1000000}, - }, - }, - { - ID: "update_requests", - Title: "Update Requests", - Units: "requests/s", - Ctx: "solr.update_requests", - Dims: Dims{ - {ID: "update_requests_count", Name: "update", Algo: module.Incremental}, - }, - }, - { - ID: "update_errors", - Title: "Update Errors", - Units: "errors/s", - Ctx: "solr.update_errors", - Dims: Dims{ - {ID: "update_errors_count", Name: "errors", Algo: module.Incremental}, - }, - }, - { - ID: "update_errors_by_type", - Title: "Update Errors By Type", - Units: "errors/s", - Ctx: "solr.update_errors_by_type", - Dims: Dims{ - {ID: "update_clientErrors_count", Name: "client", Algo: module.Incremental}, - {ID: "update_serverErrors_count", Name: "server", Algo: module.Incremental}, - {ID: "update_timeouts_count", Name: "timeouts", Algo: module.Incremental}, - }, - }, - { - ID: "update_requests_processing_time", - Title: "Update Requests Processing Time", - Units: "milliseconds", - Ctx: "solr.update_requests_processing_time", - Dims: Dims{ - {ID: "update_totalTime_count", Name: "time", Algo: module.Incremental}, - }, - }, - { - ID: "update_requests_timings", - Title: "Update Requests Timings", - Units: "milliseconds", - Ctx: "solr.update_requests_timings", - Dims: Dims{ - {ID: "update_requestTimes_min_ms", Name: "min", Div: 1000000}, - {ID: "update_requestTimes_median_ms", Name: "median", Div: 1000000}, - {ID: "update_requestTimes_mean_ms", Name: "mean", Div: 1000000}, - {ID: "update_requestTimes_max_ms", Name: "max", Div: 1000000}, - }, - }, - { - ID: "update_requests_processing_time_percentile", - Title: "Update Requests Processing Time Percentile", - Units: "milliseconds", - Ctx: "solr.update_requests_processing_time_percentile", - Dims: Dims{ - {ID: "update_requestTimes_p75_ms", Name: "p75", Div: 1000000}, - {ID: "update_requestTimes_p95_ms", Name: "p95", Div: 1000000}, - {ID: "update_requestTimes_p99_ms", Name: "p99", Div: 1000000}, - {ID: "update_requestTimes_p999_ms", Name: "p999", Div: 1000000}, - }, - }, -} diff --git a/modules/solr/config_schema.json b/modules/solr/config_schema.json deleted file mode 100644 index 66dde58bf..000000000 --- a/modules/solr/config_schema.json +++ /dev/null @@ -1,59 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/solr job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } - }, - "required": [ - "name", - "url" - ] -} diff --git a/modules/solr/integrations/solr.md b/modules/solr/integrations/solr.md deleted file mode 100644 index 9afebfd17..000000000 --- a/modules/solr/integrations/solr.md +++ /dev/null @@ -1,223 +0,0 @@ - - -# Solr - - - - - -Plugin: go.d.plugin -Module: solr - - - -## Overview - -This collector monitors Solr instances. - - - - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -This integration doesn't support auto-detection. - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per Solr instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| solr.search_requests | search | requests/s | -| solr.search_errors | errors | errors/s | -| solr.search_errors_by_type | client, server, timeouts | errors/s | -| solr.search_requests_processing_time | time | milliseconds | -| solr.search_requests_timings | min, median, mean, max | milliseconds | -| solr.search_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds | -| solr.update_requests | search | requests/s | -| solr.update_errors | errors | errors/s | -| solr.update_errors_by_type | client, server, timeouts | errors/s | -| solr.update_requests_processing_time | time | milliseconds | -| solr.update_requests_timings | min, median, mean, max | milliseconds | -| solr.update_requests_processing_time_percentile | p75, p95, p99, p999 | milliseconds | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Solr version 6.4+ - -This collector does not work with Solr versions lower 6.4. - - - -### Configuration - -#### File - -The configuration file name for this integration is `go.d/solr.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config go.d/solr.conf -``` -#### Options - -The following options can be defined globally: update_every, autodetection_retry. - - -
All options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Data collection frequency. | 1 | no | -| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | -| url | Server URL. | http://127.0.0.1:8983 | yes | -| socket | Server Unix socket. | | no | -| address | Server address in IP:PORT format. | | no | -| fcgi_path | Status path. | /status | no | -| timeout | HTTP request timeout. | 1 | no | -| username | Username for basic HTTP authentication. | | no | -| password | Password for basic HTTP authentication. | | no | -| proxy_url | Proxy URL. | | no | -| proxy_username | Username for proxy basic HTTP authentication. | | no | -| proxy_password | Password for proxy basic HTTP authentication. | | no | -| method | HTTP request method. | GET | no | -| body | HTTP request body. | | no | -| headers | HTTP request headers. | | no | -| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | -| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | -| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | -| tls_cert | Client TLS certificate. | | no | -| tls_key | Client TLS key. | | no | - -
- -#### Examples - -##### Basic - -An example configuration. - -
Config - -```yaml -jobs: - - name: local - url: http://localhost:8983 - -``` -
- -##### Basic HTTP auth - -Local Solr instance with basic HTTP authentication. - -
Config - -```yaml -jobs: - - name: local - url: http://localhost:8983 - username: foo - password: bar - -``` -
- -##### Multi-instance - -> **Note**: When you define multiple jobs, their names must be unique. - -Local and remote instances. - - -
Config - -```yaml -jobs: - - name: local - url: http://localhost:8983 - - - name: remote - url: http://203.0.113.10:8983 - -``` -
- - - -## Troubleshooting - -### Debug Mode - -To troubleshoot issues with the `solr` collector, run the `go.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `go.d.plugin` to debug the collector: - - ```bash - ./go.d.plugin -d -m solr - ``` - - diff --git a/modules/solr/metadata.yaml b/modules/solr/metadata.yaml deleted file mode 100644 index 066744f63..000000000 --- a/modules/solr/metadata.yaml +++ /dev/null @@ -1,268 +0,0 @@ -plugin_name: go.d.plugin -modules: - - meta: - id: collector-go.d.plugin-solr - plugin_name: go.d.plugin - module_name: solr - monitored_instance: - name: Solr - link: https://lucene.apache.org/solr/ - icon_filename: solr.svg - categories: - - data-collection.search-engines - keywords: - - solr - related_resources: - integrations: - list: [] - info_provided_to_referring_integrations: - description: "" - most_popular: false - overview: - data_collection: - metrics_description: | - This collector monitors Solr instances. - method_description: "" - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: "" - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: Solr version 6.4+ - description: | - This collector does not work with Solr versions lower 6.4. - configuration: - file: - name: go.d/solr.conf - options: - description: | - The following options can be defined globally: update_every, autodetection_retry. - folding: - title: All options - enabled: true - list: - - name: update_every - description: Data collection frequency. - default_value: 1 - required: false - - name: autodetection_retry - description: Recheck interval in seconds. Zero means no recheck will be scheduled. - default_value: 0 - required: false - - name: url - description: Server URL. - default_value: http://127.0.0.1:8983 - required: true - - name: socket - description: Server Unix socket. - default_value: "" - required: false - - name: address - description: Server address in IP:PORT format. - default_value: "" - required: false - - name: fcgi_path - description: Status path. - default_value: /status - required: false - - name: timeout - description: HTTP request timeout. - default_value: 1 - required: false - - name: username - description: Username for basic HTTP authentication. - default_value: "" - required: false - - name: password - description: Password for basic HTTP authentication. - default_value: "" - required: false - - name: proxy_url - description: Proxy URL. - default_value: "" - required: false - - name: proxy_username - description: Username for proxy basic HTTP authentication. - default_value: "" - required: false - - name: proxy_password - description: Password for proxy basic HTTP authentication. - default_value: "" - required: false - - name: method - description: HTTP request method. - default_value: GET - required: false - - name: body - description: HTTP request body. - default_value: "" - required: false - - name: headers - description: HTTP request headers. - default_value: "" - required: false - - name: not_follow_redirects - description: Redirect handling policy. Controls whether the client follows redirects. - default_value: false - required: false - - name: tls_skip_verify - description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. - default_value: false - required: false - - name: tls_ca - description: Certification authority that the client uses when verifying the server's certificates. - default_value: "" - required: false - - name: tls_cert - description: Client TLS certificate. - default_value: "" - required: false - - name: tls_key - description: Client TLS key. - default_value: "" - required: false - examples: - folding: - title: Config - enabled: true - list: - - name: Basic - description: An example configuration. - config: | - jobs: - - name: local - url: http://localhost:8983 - - name: Basic HTTP auth - description: Local Solr instance with basic HTTP authentication. - config: | - jobs: - - name: local - url: http://localhost:8983 - username: foo - password: bar - - name: Multi-instance - description: | - > **Note**: When you define multiple jobs, their names must be unique. - - Local and remote instances. - config: | - jobs: - - name: local - url: http://localhost:8983 - - - name: remote - url: http://203.0.113.10:8983 - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: These metrics refer to the entire monitored application. - labels: [] - metrics: - - name: solr.search_requests - description: Search Requests - unit: requests/s - chart_type: line - dimensions: - - name: search - - name: solr.search_errors - description: Search Errors - unit: errors/s - chart_type: line - dimensions: - - name: errors - - name: solr.search_errors_by_type - description: Search Errors By Type - unit: errors/s - chart_type: line - dimensions: - - name: client - - name: server - - name: timeouts - - name: solr.search_requests_processing_time - description: Search Requests Processing Time - unit: milliseconds - chart_type: line - dimensions: - - name: time - - name: solr.search_requests_timings - description: Search Requests Timings - unit: milliseconds - chart_type: line - dimensions: - - name: min - - name: median - - name: mean - - name: max - - name: solr.search_requests_processing_time_percentile - description: Search Requests Processing Time Percentile - unit: milliseconds - chart_type: line - dimensions: - - name: p75 - - name: p95 - - name: p99 - - name: p999 - - name: solr.update_requests - description: Update Requests - unit: requests/s - chart_type: line - dimensions: - - name: search - - name: solr.update_errors - description: Update Errors - unit: errors/s - chart_type: line - dimensions: - - name: errors - - name: solr.update_errors_by_type - description: Update Errors By Type - unit: errors/s - chart_type: line - dimensions: - - name: client - - name: server - - name: timeouts - - name: solr.update_requests_processing_time - description: Update Requests Processing Time - unit: milliseconds - chart_type: line - dimensions: - - name: time - - name: solr.update_requests_timings - description: Update Requests Timings - unit: milliseconds - chart_type: line - dimensions: - - name: min - - name: median - - name: mean - - name: max - - name: solr.update_requests_processing_time_percentile - description: Update Requests Processing Time Percentile - unit: milliseconds - chart_type: line - dimensions: - - name: p75 - - name: p95 - - name: p99 - - name: p999 diff --git a/modules/solr/parser.go b/modules/solr/parser.go deleted file mode 100644 index c8a9eaa54..000000000 --- a/modules/solr/parser.go +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package solr - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "strings" -) - -type count struct { - Count int64 -} - -type common struct { - Count int64 - MeanRate float64 `json:"meanRate"` - MinRate1min float64 `json:"1minRate"` - MinRate5min float64 `json:"5minRate"` - MinRate15min float64 `json:"15minRate"` -} - -type requestTimes struct { - Count int64 - MeanRate float64 `json:"meanRate"` - MinRate1min float64 `json:"1minRate"` - MinRate5min float64 `json:"5minRate"` - MinRate15min float64 `json:"15minRate"` - MinMS float64 `json:"min_ms"` - MaxMS float64 `json:"max_ms"` - MeanMS float64 `json:"mean_ms"` - MedianMS float64 `json:"median_ms"` - StdDevMS float64 `json:"stddev_ms"` - P75MS float64 `json:"p75_ms"` - P95MS float64 `json:"p95_ms"` - P99MS float64 `json:"p99_ms"` - P999MS float64 `json:"p999_ms"` -} - -type coresMetrics struct { - Metrics map[string]map[string]json.RawMessage -} - -func (s *Solr) parse(resp *http.Response) (map[string]int64, error) { - var cm coresMetrics - var metrics = make(map[string]int64) - - if err := json.NewDecoder(resp.Body).Decode(&cm); err != nil { - return nil, err - } - - if len(cm.Metrics) == 0 { - return nil, errors.New("unparsable data") - } - - for core, data := range cm.Metrics { - coreName := core[10:] - - if !s.cores[coreName] { - s.addCoreCharts(coreName) - s.cores[coreName] = true - } - - if err := s.parseCore(coreName, data, metrics); err != nil { - return nil, err - } - } - - return metrics, nil -} - -func (s *Solr) parseCore(core string, data map[string]json.RawMessage, metrics map[string]int64) error { - var ( - simpleCount int64 - count count - common common - requestTimes requestTimes - ) - - for metric, stats := range data { - parts := strings.Split(metric, ".") - - if len(parts) != 3 { - continue - } - - typ, handler, stat := strings.ToLower(parts[0]), parts[1], parts[2] - - if handler == "updateHandler" { - // TODO: - continue - } - - switch stat { - case "clientErrors", "errors", "serverErrors", "timeouts": - if err := json.Unmarshal(stats, &common); err != nil { - return err - } - metrics[format("%s_%s_%s_count", core, typ, stat)] += common.Count - case "requests", "totalTime": - var c int64 - if s.version < 7.0 { - if err := json.Unmarshal(stats, &count); err != nil { - return err - } - c = count.Count - } else { - if err := json.Unmarshal(stats, &simpleCount); err != nil { - return err - } - c = simpleCount - } - metrics[format("%s_%s_%s_count", core, typ, stat)] += c - case "requestTimes": - if err := json.Unmarshal(stats, &requestTimes); err != nil { - return err - } - metrics[format("%s_%s_%s_count", core, typ, stat)] += requestTimes.Count - metrics[format("%s_%s_%s_min_ms", core, typ, stat)] += int64(requestTimes.MinMS * 1e6) - metrics[format("%s_%s_%s_mean_ms", core, typ, stat)] += int64(requestTimes.MeanMS * 1e6) - metrics[format("%s_%s_%s_median_ms", core, typ, stat)] += int64(requestTimes.MedianMS * 1e6) - metrics[format("%s_%s_%s_max_ms", core, typ, stat)] += int64(requestTimes.MaxMS * 1e6) - metrics[format("%s_%s_%s_p75_ms", core, typ, stat)] += int64(requestTimes.P75MS * 1e6) - metrics[format("%s_%s_%s_p95_ms", core, typ, stat)] += int64(requestTimes.P95MS * 1e6) - metrics[format("%s_%s_%s_p99_ms", core, typ, stat)] += int64(requestTimes.P99MS * 1e6) - metrics[format("%s_%s_%s_p999_ms", core, typ, stat)] += int64(requestTimes.P999MS * 1e6) - } - } - - return nil -} - -func (s *Solr) addCoreCharts(core string) { - charts := charts.Copy() - - for _, chart := range *charts { - chart.ID = format("%s_%s", core, chart.ID) - chart.Fam = format("core %s", core) - - for _, dim := range chart.Dims { - dim.ID = format("%s_%s", core, dim.ID) - } - } - - _ = s.charts.Add(*charts...) - -} - -var format = fmt.Sprintf diff --git a/modules/solr/solr.go b/modules/solr/solr.go deleted file mode 100644 index 57f2d7083..000000000 --- a/modules/solr/solr.go +++ /dev/null @@ -1,212 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package solr - -import ( - _ "embed" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/netdata/go.d.plugin/agent/module" -) - -//go:embed "config_schema.json" -var configSchema string - -func init() { - module.Register("solr", module.Creator{ - JobConfigSchema: configSchema, - Create: func() module.Module { return New() }, - }) -} - -const ( - defaultURL = "http://127.0.0.1:8983" - defaultHTTPTimeout = time.Second -) - -const ( - minSupportedVersion = 6.4 - coresHandlersURLPath = "/solr/admin/metrics" - coresHandlersURLQuery = "group=core&prefix=UPDATE,QUERY&wt=json" - infoSystemURLPath = "/solr/admin/info/system" - infoSystemURLQuery = "wt=json" -) - -type infoSystem struct { - Lucene struct { - Version string `json:"solr-spec-version"` - } -} - -// New creates Solr with default values -func New() *Solr { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, - }, - }, - } - return &Solr{ - Config: config, - cores: make(map[string]bool), - } -} - -// Config is the Solr module configuration. -type Config struct { - web.HTTP `yaml:",inline"` -} - -// Solr solr module -type Solr struct { - module.Base - Config `yaml:",inline"` - - cores map[string]bool - client *http.Client - version float64 - charts *Charts -} - -func (s *Solr) doRequest(req *http.Request) (*http.Response, error) { - return s.client.Do(req) -} - -// Cleanup makes cleanup -func (Solr) Cleanup() {} - -// Init makes initialization -func (s *Solr) Init() bool { - if s.URL == "" { - s.Error("URL not set") - return false - } - - client, err := web.NewHTTPClient(s.Client) - if err != nil { - s.Error(err) - return false - } - - s.client = client - return true -} - -// Check makes check -func (s *Solr) Check() bool { - if err := s.getVersion(); err != nil { - s.Error(err) - return false - } - - if s.version < minSupportedVersion { - s.Errorf("unsupported Solr version : %.1f", s.version) - return false - } - - return true -} - -// Charts creates Charts -func (s *Solr) Charts() *Charts { - s.charts = &Charts{} - - return s.charts -} - -// Collect collects metrics -func (s *Solr) Collect() map[string]int64 { - req, err := createRequest(s.Request, coresHandlersURLPath, coresHandlersURLQuery) - if err != nil { - s.Errorf("error on creating http request : %v", err) - return nil - } - - resp, err := s.doRequest(req) - if err != nil { - s.Errorf("error on request to %s : %s", req.URL, err) - return nil - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - s.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - return nil - } - - metrics, err := s.parse(resp) - if err != nil { - s.Errorf("error on parse response from %s : %s", req.URL, err) - return nil - } - - return metrics -} - -func (s *Solr) getVersion() error { - req, err := createRequest(s.Request, infoSystemURLPath, infoSystemURLQuery) - if err != nil { - return fmt.Errorf("error on creating http request : %v", err) - } - - resp, err := s.doRequest(req) - if err != nil { - return fmt.Errorf("error on request to %s : %s", req.URL, err) - } - defer closeBody(resp) - - if resp.StatusCode != http.StatusOK { - return fmt.Errorf("%s returned HTTP status %d", req.URL, resp.StatusCode) - } - - var info infoSystem - - if err := json.NewDecoder(resp.Body).Decode(&info); err != nil { - return fmt.Errorf("error on decode response from %s : %s", req.URL, err) - } - - var idx int - - if idx = strings.LastIndex(info.Lucene.Version, "."); idx == -1 { - return fmt.Errorf("error on parsing version '%s': bad format", info.Lucene.Version) - } - - if s.version, err = strconv.ParseFloat(info.Lucene.Version[:idx], 64); err != nil { - return fmt.Errorf("error on parsing version '%s' : %s", info.Lucene.Version, err) - } - - return nil -} - -func createRequest(req web.Request, urlPath, urlQuery string) (*http.Request, error) { - r := req.Copy() - u, err := url.Parse(r.URL) - if err != nil { - return nil, err - } - - u.Path = urlPath - u.RawQuery = urlQuery - r.URL = u.String() - return web.NewHTTPRequest(r) -} - -func closeBody(resp *http.Response) { - if resp != nil && resp.Body != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } -} diff --git a/modules/solr/solr_test.go b/modules/solr/solr_test.go deleted file mode 100644 index f545adeb0..000000000 --- a/modules/solr/solr_test.go +++ /dev/null @@ -1,274 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package solr - -import ( - "fmt" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/netdata/go.d.plugin/agent/module" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - coreMetricsV6, _ = os.ReadFile("testdata/core-metrics-v6.txt") - coreMetricsV7, _ = os.ReadFile("testdata/core-metrics-v7.txt") -) - -func version(v string) string { - return format(`{ "lucene":{ "solr-spec-version":"%s"}}`, v) -} - -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Client.Timeout.Duration) -} - -func TestSolr_Init(t *testing.T) { - job := New() - - assert.True(t, job.Init()) - assert.NotNil(t, job.client) -} - -func TestSolr_Check(t *testing.T) { - job := New() - - ts := httptest.NewServer( - http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/solr/admin/info/system" { - _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion)))) - return - } - })) - - job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) -} - -func TestSolr_Check_UnsupportedVersion(t *testing.T) { - job := New() - - ts := httptest.NewServer( - http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/solr/admin/info/system" { - _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion-1)))) - return - } - })) - - job.URL = ts.URL - - require.True(t, job.Init()) - - assert.False(t, job.Check()) -} - -func TestSolr_Charts(t *testing.T) { - assert.NotNil(t, New().Charts()) -} - -func TestSolr_Cleanup(t *testing.T) { - New().Cleanup() -} - -func TestSolr_CollectV6(t *testing.T) { - job := New() - - ts := httptest.NewServer( - http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/solr/admin/info/system" { - _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion)))) - return - } - if r.URL.Path == "/solr/admin/metrics" { - _, _ = w.Write(coreMetricsV6) - return - } - })) - - job.URL = ts.URL - - require.True(t, job.Init()) - require.True(t, job.Check()) - require.NotNil(t, job.Charts()) - - expected := map[string]int64{ - "core2_query_requestTimes_min_ms": 0, - "core1_query_serverErrors_count": 3, - "core2_update_requestTimes_mean_ms": 0, - "core2_query_requestTimes_p99_ms": 297000000, - "core2_query_requestTimes_p999_ms": 2997000000, - "core1_update_requestTimes_p99_ms": 297000000, - "core2_update_requestTimes_p75_ms": 225000000, - "core2_update_requests_count": 3, - "core2_query_requestTimes_p75_ms": 225000000, - "core2_update_requestTimes_min_ms": 0, - "core2_query_clientErrors_count": 3, - "core2_query_requestTimes_count": 3, - "core2_query_requestTimes_median_ms": 0, - "core2_query_requestTimes_p95_ms": 285000000, - "core2_update_serverErrors_count": 3, - "core1_query_requestTimes_mean_ms": 0, - "core1_update_totalTime_count": 3, - "core1_update_errors_count": 3, - "core1_query_errors_count": 3, - "core1_query_timeouts_count": 3, - "core1_update_requestTimes_p95_ms": 285000000, - "core1_query_clientErrors_count": 3, - "core2_query_serverErrors_count": 3, - "core1_update_requestTimes_p75_ms": 225000000, - "core2_update_requestTimes_p99_ms": 297000000, - "core2_query_requests_count": 3, - "core2_update_clientErrors_count": 3, - "core1_update_requestTimes_min_ms": 0, - "core1_update_requestTimes_mean_ms": 0, - "core1_query_requestTimes_p95_ms": 285000000, - "core1_query_requestTimes_p999_ms": 2997000000, - "core1_update_serverErrors_count": 3, - "core1_query_requests_count": 3, - "core1_update_requestTimes_p999_ms": 2997000000, - "core1_query_requestTimes_p75_ms": 225000000, - "core1_update_requestTimes_count": 3, - "core2_update_requestTimes_p95_ms": 285000000, - "core1_query_requestTimes_count": 3, - "core1_query_requestTimes_p99_ms": 297000000, - "core1_update_requestTimes_median_ms": 0, - "core1_update_requestTimes_max_ms": 0, - "core2_update_requestTimes_count": 3, - "core1_query_requestTimes_min_ms": 0, - "core1_update_timeouts_count": 3, - "core2_update_timeouts_count": 3, - "core2_update_errors_count": 3, - "core1_update_requests_count": 3, - "core2_query_errors_count": 3, - "core1_query_requestTimes_median_ms": 0, - "core1_query_requestTimes_max_ms": 0, - "core1_update_clientErrors_count": 3, - "core2_update_requestTimes_median_ms": 0, - "core2_query_requestTimes_mean_ms": 0, - "core2_update_totalTime_count": 3, - "core2_update_requestTimes_max_ms": 0, - "core2_update_requestTimes_p999_ms": 2997000000, - "core2_query_timeouts_count": 3, - "core2_query_requestTimes_max_ms": 0, - "core1_query_totalTime_count": 3, - "core2_query_totalTime_count": 3, - } - - assert.Equal(t, expected, job.Collect()) - assert.Equal(t, expected, job.Collect()) -} - -func TestSolr_CollectV7(t *testing.T) { - job := New() - - ts := httptest.NewServer( - http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/solr/admin/info/system" { - _, _ = w.Write([]byte(version(fmt.Sprintf("%.1f.0", minSupportedVersion+1)))) - return - } - if r.URL.Path == "/solr/admin/metrics" { - _, _ = w.Write(coreMetricsV7) - return - } - })) - - job.URL = ts.URL - - require.True(t, job.Init()) - require.True(t, job.Check()) - require.NotNil(t, job.Charts()) - - expected := map[string]int64{ - "core1_query_requestTimes_p95_ms": 285000000, - "core1_query_timeouts_count": 3, - "core1_update_requestTimes_p999_ms": 2997000000, - "core2_query_requestTimes_mean_ms": 0, - "core2_query_timeouts_count": 3, - "core1_update_timeouts_count": 3, - "core1_update_requestTimes_mean_ms": 0, - "core2_update_serverErrors_count": 3, - "core2_query_requestTimes_min_ms": 0, - "core2_query_requestTimes_p75_ms": 225000000, - "core2_update_clientErrors_count": 3, - "core2_update_requestTimes_count": 3, - "core2_query_requestTimes_max_ms": 0, - "core1_query_requestTimes_mean_ms": 0, - "core1_update_totalTime_count": 3, - "core1_query_serverErrors_count": 3, - "core1_update_requestTimes_p99_ms": 297000000, - "core2_query_totalTime_count": 3, - "core2_update_requestTimes_max_ms": 0, - "core2_query_requestTimes_p99_ms": 297000000, - "core1_query_requestTimes_count": 3, - "core1_query_requestTimes_median_ms": 0, - "core1_query_clientErrors_count": 3, - "core2_update_requestTimes_mean_ms": 0, - "core2_update_requestTimes_median_ms": 0, - "core2_update_requestTimes_p95_ms": 285000000, - "core2_update_requestTimes_p999_ms": 2997000000, - "core2_update_totalTime_count": 3, - "core1_update_clientErrors_count": 3, - "core2_query_serverErrors_count": 3, - "core2_query_requests_count": 3, - "core1_update_serverErrors_count": 3, - "core1_update_requestTimes_p75_ms": 225000000, - "core2_update_requestTimes_min_ms": 0, - "core2_query_errors_count": 3, - "core1_update_errors_count": 3, - "core1_query_totalTime_count": 3, - "core1_update_requestTimes_p95_ms": 285000000, - "core2_query_requestTimes_p95_ms": 285000000, - "core2_query_requestTimes_p999_ms": 2997000000, - "core1_query_requestTimes_min_ms": 0, - "core2_update_errors_count": 3, - "core2_query_clientErrors_count": 3, - "core1_update_requestTimes_min_ms": 0, - "core1_query_requestTimes_max_ms": 0, - "core1_query_requestTimes_p75_ms": 225000000, - "core1_query_requestTimes_p999_ms": 2997000000, - "core2_update_requestTimes_p75_ms": 225000000, - "core2_update_timeouts_count": 3, - "core1_query_requestTimes_p99_ms": 297000000, - "core1_update_requests_count": 3, - "core1_update_requestTimes_median_ms": 0, - "core1_update_requestTimes_max_ms": 0, - "core2_update_requestTimes_p99_ms": 297000000, - "core2_query_requestTimes_count": 3, - "core1_query_errors_count": 3, - "core1_query_requests_count": 3, - "core1_update_requestTimes_count": 3, - "core2_update_requests_count": 3, - "core2_query_requestTimes_median_ms": 0, - } - - assert.Equal(t, expected, job.Collect()) - assert.Equal(t, expected, job.Collect()) -} - -func TestSolr_Collect_404(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - })) - defer ts.Close() - - job := New() - job.URL = ts.URL - - require.True(t, job.Init()) - assert.False(t, job.Check()) -} diff --git a/modules/solr/testdata/core-metrics-v6.txt b/modules/solr/testdata/core-metrics-v6.txt deleted file mode 100644 index 30d756b58..000000000 --- a/modules/solr/testdata/core-metrics-v6.txt +++ /dev/null @@ -1,794 +0,0 @@ -{ - "responseHeader":{ - "status":0, - "QTime":5 - }, - "metrics":{ - "solr.core.core1":{ - "QUERY./select.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./select.requests":{ - "count":1 - }, - "QUERY./select.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.totalTime":{ - "count":1 - }, - "QUERY./sql.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./sql.requests":{ - "count":1 - }, - "QUERY./sql.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.totalTime":{ - "count":1 - }, - "QUERY./stream.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./stream.requests":{ - "count":1 - }, - "QUERY./stream.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.totalTime":{ - "count":1 - }, - "UPDATE./update.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update.requests":{ - "count":1 - }, - "UPDATE./update.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.totalTime":{ - "count":1 - }, - "UPDATE./update/csv.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/csv.requests":{ - "count":1 - }, - "UPDATE./update/csv.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.totalTime":{ - "count":1 - }, - "UPDATE./update/json.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/json.requests":{ - "count":1 - }, - "UPDATE./update/json.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.totalTime":{ - "count":1 - }, - "UPDATE.updateHandler.adds":{ - "value":0 - }, - "UPDATE.updateHandler.autoCommits":{ - "value":0 - }, - "UPDATE.updateHandler.commits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeAdds":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesById":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesByQuery":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.deletesById":{ - "value":0 - }, - "UPDATE.updateHandler.deletesByQuery":{ - "value":0 - }, - "UPDATE.updateHandler.docsPending":{ - "value":0 - }, - "UPDATE.updateHandler.errors":{ - "value":0 - }, - "UPDATE.updateHandler.expungeDeletes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.merges":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.optimizes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.rollbacks":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.softAutoCommits":{ - "value":0 - }, - "UPDATE.updateHandler.splits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - } - }, - "solr.core.core2":{ - "QUERY./select.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./select.requests":{ - "count":1 - }, - "QUERY./select.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.totalTime":{ - "count":1 - }, - "QUERY./sql.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./sql.requests":{ - "count":1 - }, - "QUERY./sql.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.totalTime":{ - "count":1 - }, - "QUERY./stream.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./stream.requests":{ - "count":1 - }, - "QUERY./stream.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.totalTime":{ - "count":1 - }, - "UPDATE./update.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update.requests":{ - "count":1 - }, - "UPDATE./update.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.totalTime":{ - "count":1 - }, - "UPDATE./update/csv.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/csv.requests":{ - "count":1 - }, - "UPDATE./update/csv.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.totalTime":{ - "count":1 - }, - "UPDATE./update/json.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/json.requests":{ - "count":1 - }, - "UPDATE./update/json.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.totalTime":{ - "count":1 - }, - "UPDATE.updateHandler.adds":{ - "value":0 - }, - "UPDATE.updateHandler.autoCommits":{ - "value":0 - }, - "UPDATE.updateHandler.commits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeAdds":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesById":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesByQuery":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.deletesById":{ - "value":0 - }, - "UPDATE.updateHandler.deletesByQuery":{ - "value":0 - }, - "UPDATE.updateHandler.docsPending":{ - "value":0 - }, - "UPDATE.updateHandler.errors":{ - "value":0 - }, - "UPDATE.updateHandler.expungeDeletes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.merges":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.optimizes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.rollbacks":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.softAutoCommits":{ - "value":0 - }, - "UPDATE.updateHandler.splits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - } - } - } -} \ No newline at end of file diff --git a/modules/solr/testdata/core-metrics-v7.txt b/modules/solr/testdata/core-metrics-v7.txt deleted file mode 100644 index 0567f0d9b..000000000 --- a/modules/solr/testdata/core-metrics-v7.txt +++ /dev/null @@ -1,732 +0,0 @@ -{ - "responseHeader":{ - "status":0, - "QTime":5 - }, - "metrics":{ - "solr.core.core1":{ - "QUERY./select.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.handlerStart":1546020968904, - "QUERY./select.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./select.requests":1, - "QUERY./select.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.totalTime":1, - "QUERY./sql.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.handlerStart":1546020968901, - "QUERY./sql.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./sql.requests":1, - "QUERY./sql.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.totalTime":1, - "QUERY./stream.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.handlerStart":1546020968894, - "QUERY./stream.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./stream.requests":1, - "QUERY./stream.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.totalTime":1, - "UPDATE./update.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.handlerStart":1546020968419, - "UPDATE./update.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update.requests":1, - "UPDATE./update.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.totalTime":1, - "UPDATE./update/csv.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.handlerStart":1546020968462, - "UPDATE./update/csv.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/csv.requests":1, - "UPDATE./update/csv.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.totalTime":1, - "UPDATE./update/json.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.handlerStart":1546020968445, - "UPDATE./update/json.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/json.requests":1, - "UPDATE./update/json.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.totalTime":1, - "UPDATE.updateHandler.adds":0, - "UPDATE.updateHandler.autoCommitMaxTime":"15000ms", - "UPDATE.updateHandler.autoCommits":0, - "UPDATE.updateHandler.commits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeAdds":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesById":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesByQuery":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.deletesById":0, - "UPDATE.updateHandler.deletesByQuery":0, - "UPDATE.updateHandler.docsPending":0, - "UPDATE.updateHandler.errors":0, - "UPDATE.updateHandler.expungeDeletes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.merges":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.optimizes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.rollbacks":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.softAutoCommits":0, - "UPDATE.updateHandler.splits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - } - }, - "solr.core.core2":{ - "QUERY./select.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.handlerStart":1546020968904, - "QUERY./select.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./select.requests":1, - "QUERY./select.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./select.totalTime":1, - "QUERY./sql.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.handlerStart":1546020968901, - "QUERY./sql.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./sql.requests":1, - "QUERY./sql.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./sql.totalTime":1, - "QUERY./stream.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.handlerStart":1546020968894, - "QUERY./stream.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "QUERY./stream.requests":1, - "QUERY./stream.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "QUERY./stream.totalTime":1, - "UPDATE./update.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.handlerStart":1546020968419, - "UPDATE./update.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update.requests":1, - "UPDATE./update.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update.totalTime":1, - "UPDATE./update/csv.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.handlerStart":1546020968462, - "UPDATE./update/csv.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/csv.requests":1, - "UPDATE./update/csv.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/csv.totalTime":1, - "UPDATE./update/json.clientErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.errors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.handlerStart":1546020968445, - "UPDATE./update/json.requestTimes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0, - "min_ms":0, - "max_ms":0, - "mean_ms":0, - "median_ms":0, - "stddev_ms":0, - "p75_ms":75, - "p95_ms":95, - "p99_ms":99, - "p999_ms":999 - }, - "UPDATE./update/json.requests":1, - "UPDATE./update/json.serverErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.timeouts":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE./update/json.totalTime":1, - "UPDATE.updateHandler.adds":0, - "UPDATE.updateHandler.autoCommitMaxTime":"15000ms", - "UPDATE.updateHandler.autoCommits":0, - "UPDATE.updateHandler.commits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeAdds":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesById":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeDeletesByQuery":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.cumulativeErrors":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.deletesById":0, - "UPDATE.updateHandler.deletesByQuery":0, - "UPDATE.updateHandler.docsPending":0, - "UPDATE.updateHandler.errors":0, - "UPDATE.updateHandler.expungeDeletes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.merges":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.optimizes":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.rollbacks":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - }, - "UPDATE.updateHandler.softAutoCommits":0, - "UPDATE.updateHandler.splits":{ - "count":1, - "meanRate":0, - "1minRate":0, - "5minRate":0, - "15minRate":0 - } - } - } -} \ No newline at end of file diff --git a/modules/springboot2/README.md b/modules/springboot2/README.md deleted file mode 120000 index 67b32e517..000000000 --- a/modules/springboot2/README.md +++ /dev/null @@ -1 +0,0 @@ -integrations/java_spring-boot_2_applications.md \ No newline at end of file diff --git a/modules/springboot2/charts.go b/modules/springboot2/charts.go deleted file mode 100644 index 9ca9c5806..000000000 --- a/modules/springboot2/charts.go +++ /dev/null @@ -1,77 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package springboot2 - -import ( - "github.com/netdata/go.d.plugin/agent/module" -) - -type ( - // Charts is an alias for module.Charts - Charts = module.Charts - // Dims is an alias for module.Dims - Dims = module.Dims -) - -var charts = Charts{ - { - ID: "response_codes", - Title: "Response Codes", Units: "requests/s", Fam: "response_code", Type: module.Stacked, Ctx: "springboot2.response_codes", - Dims: Dims{ - {ID: "resp_2xx", Name: "2xx", Algo: module.Incremental}, - {ID: "resp_5xx", Name: "5xx", Algo: module.Incremental}, - {ID: "resp_3xx", Name: "3xx", Algo: module.Incremental}, - {ID: "resp_4xx", Name: "4xx", Algo: module.Incremental}, - {ID: "resp_1xx", Name: "1xx", Algo: module.Incremental}, - }, - }, - { - ID: "thread", - Title: "Threads", Units: "threads", Fam: "threads", Type: module.Area, Ctx: "springboot2.thread", - Dims: Dims{ - {ID: "threads_daemon", Name: "daemon"}, - {ID: "threads", Name: "total"}, - }, - }, - { - ID: "heap", - Title: "Overview", Units: "B", Fam: "heap", Type: module.Stacked, Ctx: "springboot2.heap", - Dims: Dims{ - {ID: "mem_free", Name: "free"}, - {ID: "heap_used_eden", Name: "eden"}, - {ID: "heap_used_survivor", Name: "survivor"}, - {ID: "heap_used_old", Name: "old"}, - }, - }, - { - ID: "heap_eden", - Title: "Eden Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_eden", - Dims: Dims{ - {ID: "heap_used_eden", Name: "used"}, - {ID: "heap_committed_eden", Name: "committed"}, - }, - }, - { - ID: "heap_survivor", - Title: "Survivor Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_survivor", - Dims: Dims{ - {ID: "heap_used_survivor", Name: "used"}, - {ID: "heap_committed_survivor", Name: "committed"}, - }, - }, - { - ID: "heap_old", - Title: "Old Space", Units: "B", Fam: "heap", Type: module.Area, Ctx: "springboot2.heap_old", - Dims: Dims{ - {ID: "heap_used_old", Name: "used"}, - {ID: "heap_committed_old", Name: "committed"}, - }, - }, - { - ID: "uptime", - Title: "The uptime of the Java virtual machine", Units: "seconds", Fam: "uptime", Type: module.Line, Ctx: "springboot2.uptime", - Dims: Dims{ - {ID: "uptime", Name: "uptime", Div: 1000}, - }, - }, -} diff --git a/modules/springboot2/config_schema.json b/modules/springboot2/config_schema.json deleted file mode 100644 index 008a8bb2d..000000000 --- a/modules/springboot2/config_schema.json +++ /dev/null @@ -1,76 +0,0 @@ -{ - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/springboot2 job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "uri_filter": { - "type": "object", - "properties": { - "includes": { - "type": "array", - "items": { - "type": "string" - } - }, - "excludes": { - "type": "array", - "items": { - "type": "string" - } - } - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } - }, - "required": [ - "name", - "url" - ] -} diff --git a/modules/springboot2/integrations/java_spring-boot_2_applications.md b/modules/springboot2/integrations/java_spring-boot_2_applications.md deleted file mode 100644 index 534f75f92..000000000 --- a/modules/springboot2/integrations/java_spring-boot_2_applications.md +++ /dev/null @@ -1,233 +0,0 @@ - - -# Java Spring-boot 2 applications - - - - - -Plugin: go.d.plugin -Module: springboot2 - - - -## Overview - -This collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library. - - - - -This collector is supported on all platforms. - -This collector supports collecting metrics from multiple instances of this integration, including remote instances. - - -### Default Behavior - -#### Auto-Detection - -By default, it detects applications running on localhost. - - -#### Limits - -The default configuration for this integration does not impose any limits on data collection. - -#### Performance Impact - -The default configuration for this integration is not expected to impose a significant performance impact on the system. - - -## Metrics - -Metrics grouped by *scope*. - -The scope defines the instance that the metric belongs to. An instance is uniquely identified by a set of labels. - - - -### Per Java Spring-boot 2 applications instance - -These metrics refer to the entire monitored application. - -This scope has no labels. - -Metrics: - -| Metric | Dimensions | Unit | -|:------|:----------|:----| -| springboot2.response_codes | 1xx, 2xx, 3xx, 4xx, 5xx | requests/s | -| springboot2.thread | daemon, total | threads | -| springboot2.heap | free, eden, survivor, old | B | -| springboot2.heap_eden | used, commited | B | -| springboot2.heap_survivor | used, commited | B | -| springboot2.heap_old | used, commited | B | -| springboot2.uptime | uptime | seconds | - - - -## Alerts - -There are no alerts configured by default for this integration. - - -## Setup - -### Prerequisites - -#### Configure Spring Boot Actuator - -The Spring Boot Actuator exposes metrics over HTTP, to use it: - -- add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies. -- set `management.endpoints.web.exposure.include=*` in your `application.properties`. - -Refer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information. - - - -### Configuration - -#### File - -The configuration file name for this integration is `go.d/springboot2.conf`. - - -You can edit the configuration file using the `edit-config` script from the -Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory). - -```bash -cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata -sudo ./edit-config go.d/springboot2.conf -``` -#### Options - -The following options can be defined globally: update_every, autodetection_retry. - - -
Config options - -| Name | Description | Default | Required | -|:----|:-----------|:-------|:--------:| -| update_every | Data collection frequency. | 1 | no | -| autodetection_retry | Recheck interval in seconds. Zero means no recheck will be scheduled. | 0 | no | -| url | Server URL. | | yes | -| timeout | HTTP request timeout. | 1 | no | -| username | Username for basic HTTP authentication. | | no | -| password | Password for basic HTTP authentication. | | no | -| proxy_url | Proxy URL. | | no | -| proxy_username | Username for proxy basic HTTP authentication. | | no | -| proxy_password | Password for proxy basic HTTP authentication. | | no | -| method | HTTP request method. | GET | no | -| body | HTTP request body. | | no | -| headers | HTTP request headers. | | no | -| not_follow_redirects | Redirect handling policy. Controls whether the client follows redirects. | no | no | -| tls_skip_verify | Server certificate chain and hostname validation policy. Controls whether the client performs this check. | no | no | -| tls_ca | Certification authority that the client uses when verifying the server's certificates. | | no | -| tls_cert | Client TLS certificate. | | no | -| tls_key | Client TLS key. | | no | - -
- -#### Examples - -##### Basic - -A basic example configuration. - -```yaml -jobs: - - name: local - url: http://127.0.0.1:8080/actuator/prometheus - -``` -##### HTTP authentication - -Basic HTTP authentication. - -
Config - -```yaml -jobs: - - name: local - url: http://127.0.0.1:8080/actuator/prometheus - username: username - password: password - -``` -
- -##### HTTPS with self-signed certificate - -Do not validate server certificate chain and hostname. - - -
Config - -```yaml -jobs: - - name: local - url: https://127.0.0.1:8080/actuator/prometheus - tls_skip_verify: yes - -``` -
- -##### Multi-instance - -> **Note**: When you define multiple jobs, their names must be unique. - -Collecting metrics from local and remote instances. - - -
Config - -```yaml -jobs: - - name: local - url: http://127.0.0.1:8080/actuator/prometheus - - - name: remote - url: http://192.0.2.1:8080/actuator/prometheus - -``` -
- - - -## Troubleshooting - -### Debug Mode - -To troubleshoot issues with the `springboot2` collector, run the `go.d.plugin` with the debug option enabled. The output -should give you clues as to why the collector isn't working. - -- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on - your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`. - - ```bash - cd /usr/libexec/netdata/plugins.d/ - ``` - -- Switch to the `netdata` user. - - ```bash - sudo -u netdata -s - ``` - -- Run the `go.d.plugin` to debug the collector: - - ```bash - ./go.d.plugin -d -m springboot2 - ``` - - diff --git a/modules/springboot2/metadata.yaml b/modules/springboot2/metadata.yaml deleted file mode 100644 index 462d29dae..000000000 --- a/modules/springboot2/metadata.yaml +++ /dev/null @@ -1,239 +0,0 @@ -plugin_name: go.d.plugin -modules: - - meta: - id: collector-go.d.plugin-springboot2 - plugin_name: go.d.plugin - module_name: springboot2 - monitored_instance: - name: Java Spring-boot 2 applications - link: "" - icon_filename: springboot.png - categories: - - data-collection.apm - keywords: - - springboot - related_resources: - integrations: - list: - - plugin_name: apps.plugin - module_name: apps - info_provided_to_referring_integrations: - description: "" - most_popular: true - overview: - data_collection: - metrics_description: | - This collector monitors Java Spring-boot 2 applications that expose their metrics using the Spring Boot Actuator included in the Spring Boot library. - method_description: "" - supported_platforms: - include: [] - exclude: [] - multi_instance: true - additional_permissions: - description: "" - default_behavior: - auto_detection: - description: | - By default, it detects applications running on localhost. - limits: - description: "" - performance_impact: - description: "" - setup: - prerequisites: - list: - - title: Configure Spring Boot Actuator - description: | - The Spring Boot Actuator exposes metrics over HTTP, to use it: - - - add `org.springframework.boot:spring-boot-starter-actuator` and `io.micrometer:micrometer-registry-prometheus` to your application dependencies. - - set `management.endpoints.web.exposure.include=*` in your `application.properties`. - - Refer to the [Spring Boot Actuator: Production-ready features](https://docs.spring.io/spring-boot/docs/current/reference/html/production-ready.html) and [81. Actuator - Part IX. ‘How-to’ guides](https://docs.spring.io/spring-boot/docs/current/reference/html/howto-actuator.html) for more information. - configuration: - file: - name: go.d/springboot2.conf - options: - description: | - The following options can be defined globally: update_every, autodetection_retry. - folding: - title: Config options - enabled: true - list: - - name: update_every - description: Data collection frequency. - default_value: 1 - required: false - - name: autodetection_retry - description: Recheck interval in seconds. Zero means no recheck will be scheduled. - default_value: 0 - required: false - - name: url - description: Server URL. - default_value: "" - required: true - - name: timeout - description: HTTP request timeout. - default_value: 1 - required: false - - name: username - description: Username for basic HTTP authentication. - default_value: "" - required: false - - name: password - description: Password for basic HTTP authentication. - default_value: "" - required: false - - name: proxy_url - description: Proxy URL. - default_value: "" - required: false - - name: proxy_username - description: Username for proxy basic HTTP authentication. - default_value: "" - required: false - - name: proxy_password - description: Password for proxy basic HTTP authentication. - default_value: "" - required: false - - name: method - description: HTTP request method. - default_value: GET - required: false - - name: body - description: HTTP request body. - default_value: "" - required: false - - name: headers - description: HTTP request headers. - default_value: "" - required: false - - name: not_follow_redirects - description: Redirect handling policy. Controls whether the client follows redirects. - default_value: no - required: false - - name: tls_skip_verify - description: Server certificate chain and hostname validation policy. Controls whether the client performs this check. - default_value: no - required: false - - name: tls_ca - description: Certification authority that the client uses when verifying the server's certificates. - default_value: "" - required: false - - name: tls_cert - description: Client TLS certificate. - default_value: "" - required: false - - name: tls_key - description: Client TLS key. - default_value: "" - required: false - examples: - folding: - title: Config - enabled: true - list: - - name: Basic - folding: - enabled: false - description: A basic example configuration. - config: | - jobs: - - name: local - url: http://127.0.0.1:8080/actuator/prometheus - - name: HTTP authentication - description: Basic HTTP authentication. - config: | - jobs: - - name: local - url: http://127.0.0.1:8080/actuator/prometheus - username: username - password: password - - name: HTTPS with self-signed certificate - description: | - Do not validate server certificate chain and hostname. - config: | - jobs: - - name: local - url: https://127.0.0.1:8080/actuator/prometheus - tls_skip_verify: yes - - name: Multi-instance - description: | - > **Note**: When you define multiple jobs, their names must be unique. - - Collecting metrics from local and remote instances. - config: | - jobs: - - name: local - url: http://127.0.0.1:8080/actuator/prometheus - - - name: remote - url: http://192.0.2.1:8080/actuator/prometheus - troubleshooting: - problems: - list: [] - alerts: [] - metrics: - folding: - title: Metrics - enabled: false - description: "" - availability: [] - scopes: - - name: global - description: These metrics refer to the entire monitored application. - labels: [] - metrics: - - name: springboot2.response_codes - description: Response Codes - unit: requests/s - chart_type: stacked - dimensions: - - name: 1xx - - name: 2xx - - name: 3xx - - name: 4xx - - name: 5xx - - name: springboot2.thread - description: Threads - unit: threads - chart_type: area - dimensions: - - name: daemon - - name: total - - name: springboot2.heap - description: Overview - unit: B - chart_type: stacked - dimensions: - - name: free - - name: eden - - name: survivor - - name: old - - name: springboot2.heap_eden - description: Eden Space - unit: B - chart_type: area - dimensions: - - name: used - - name: commited - - name: springboot2.heap_survivor - description: Survivor Space - unit: B - chart_type: area - dimensions: - - name: used - - name: commited - - name: springboot2.heap_old - description: Old Space - unit: B - chart_type: area - dimensions: - - name: used - - name: commited - - name: springboot2.uptime - description: TThe uptime of the Java virtual machine - unit: seconds - chart_type: line - dimensions: - - name: uptime diff --git a/modules/springboot2/springboot2.go b/modules/springboot2/springboot2.go deleted file mode 100644 index cff9d9c07..000000000 --- a/modules/springboot2/springboot2.go +++ /dev/null @@ -1,190 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package springboot2 - -import ( - _ "embed" - "strings" - "time" - - "github.com/netdata/go.d.plugin/pkg/matcher" - - mtx "github.com/netdata/go.d.plugin/pkg/metrics" - "github.com/netdata/go.d.plugin/pkg/prometheus" - "github.com/netdata/go.d.plugin/pkg/stm" - "github.com/netdata/go.d.plugin/pkg/web" - - "github.com/netdata/go.d.plugin/agent/module" -) - -//go:embed "config_schema.json" -var configSchema string - -func init() { - module.Register("springboot2", module.Creator{ - JobConfigSchema: configSchema, - Create: func() module.Module { return New() }, - }) -} - -const ( - defaultHTTPTimeout = time.Second -) - -// New returns SpringBoot2 instance with default values -func New() *SpringBoot2 { - return &SpringBoot2{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, - }, - }, - } -} - -// SpringBoot2 Spring boot 2 module -type SpringBoot2 struct { - module.Base - - web.HTTP `yaml:",inline"` - URIFilter matcher.SimpleExpr `yaml:"uri_filter"` - - uriFilter matcher.Matcher - - prom prometheus.Prometheus -} - -type metrics struct { - Uptime mtx.Gauge `stm:"uptime,1000"` - - ThreadsDaemon mtx.Gauge `stm:"threads_daemon"` - Threads mtx.Gauge `stm:"threads"` - - Resp1xx mtx.Counter `stm:"resp_1xx"` - Resp2xx mtx.Counter `stm:"resp_2xx"` - Resp3xx mtx.Counter `stm:"resp_3xx"` - Resp4xx mtx.Counter `stm:"resp_4xx"` - Resp5xx mtx.Counter `stm:"resp_5xx"` - - HeapUsed heap `stm:"heap_used"` - HeapCommitted heap `stm:"heap_committed"` - - MemFree mtx.Gauge `stm:"mem_free"` -} - -type heap struct { - Eden mtx.Gauge `stm:"eden"` - Survivor mtx.Gauge `stm:"survivor"` - Old mtx.Gauge `stm:"old"` -} - -// Cleanup Cleanup -func (SpringBoot2) Cleanup() {} - -// Init makes initialization -func (s *SpringBoot2) Init() bool { - client, err := web.NewHTTPClient(s.Client) - if err != nil { - s.Error(err) - return false - } - s.uriFilter, err = s.URIFilter.Parse() - if err != nil && err != matcher.ErrEmptyExpr { - s.Error(err) - return false - } - s.prom = prometheus.New(client, s.Request) - return true -} - -// Check makes check -func (s *SpringBoot2) Check() bool { - rawMetrics, err := s.prom.ScrapeSeries() - if err != nil { - s.Warning(err) - return false - } - jvmMemory := rawMetrics.FindByName("jvm_memory_used_bytes") - - return len(jvmMemory) > 0 -} - -// Charts creates Charts -func (SpringBoot2) Charts() *Charts { - return charts.Copy() -} - -// Collect collects metrics -func (s *SpringBoot2) Collect() map[string]int64 { - rawMetrics, err := s.prom.ScrapeSeries() - if err != nil { - return nil - } - - var m metrics - - // uptime - m.Uptime.Set(rawMetrics.FindByName("process_uptime_seconds").Max()) - - // response - s.gatherResponse(rawMetrics, &m) - - // threads - m.ThreadsDaemon.Set(rawMetrics.FindByNames("jvm_threads_daemon", "jvm_threads_daemon_threads").Max()) - m.Threads.Set(rawMetrics.FindByNames("jvm_threads_live", "jvm_threads_live_threads").Max()) - - // heap memory - gatherHeap(rawMetrics.FindByName("jvm_memory_used_bytes"), &m.HeapUsed) - gatherHeap(rawMetrics.FindByName("jvm_memory_committed_bytes"), &m.HeapCommitted) - m.MemFree.Set(m.HeapCommitted.Sum() - m.HeapUsed.Sum()) - - return stm.ToMap(m) -} - -func gatherHeap(rawMetrics prometheus.Series, m *heap) { - for _, metric := range rawMetrics { - id := metric.Labels.Get("id") - value := metric.Value - switch { - case strings.Contains(id, "Eden"): - m.Eden.Set(value) - case strings.Contains(id, "Survivor"): - m.Survivor.Set(value) - case strings.Contains(id, "Old") || strings.Contains(id, "Tenured"): - m.Old.Set(value) - } - } -} - -func (s *SpringBoot2) gatherResponse(rawMetrics prometheus.Series, m *metrics) { - for _, metric := range rawMetrics.FindByName("http_server_requests_seconds_count") { - if s.uriFilter != nil { - uri := metric.Labels.Get("uri") - if !s.uriFilter.MatchString(uri) { - continue - } - } - - status := metric.Labels.Get("status") - if status == "" { - continue - } - value := metric.Value - switch status[0] { - case '1': - m.Resp1xx.Add(value) - case '2': - m.Resp2xx.Add(value) - case '3': - m.Resp3xx.Add(value) - case '4': - m.Resp4xx.Add(value) - case '5': - m.Resp5xx.Add(value) - } - } -} - -func (h heap) Sum() float64 { - return h.Eden.Value() + h.Survivor.Value() + h.Old.Value() -} diff --git a/modules/springboot2/springboot2_test.go b/modules/springboot2/springboot2_test.go deleted file mode 100644 index 7198498d5..000000000 --- a/modules/springboot2/springboot2_test.go +++ /dev/null @@ -1,103 +0,0 @@ -// SPDX-License-Identifier: GPL-3.0-or-later - -package springboot2 - -import ( - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/stretchr/testify/assert" -) - -var ( - testdata, _ = os.ReadFile("tests/testdata.txt") - testdata2, _ = os.ReadFile("tests/testdata2.txt") -) - -func TestSpringboot2_Collect(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.URL.Path { - case "/actuator/prometheus": - _, _ = w.Write(testdata) - case "/actuator/prometheus2": - _, _ = w.Write(testdata2) - } - })) - defer ts.Close() - job1 := New() - job1.HTTP.Request.URL = ts.URL + "/actuator/prometheus" - assert.True(t, job1.Init()) - assert.True(t, job1.Check()) - assert.EqualValues( - t, - map[string]int64{ - "threads": 23, - "threads_daemon": 21, - "resp_1xx": 1, - "resp_2xx": 19, - "resp_3xx": 1, - "resp_4xx": 4, - "resp_5xx": 1, - "heap_used_eden": 129649936, - "heap_used_survivor": 8900136, - "heap_used_old": 17827920, - "heap_committed_eden": 153616384, - "heap_committed_survivor": 8912896, - "heap_committed_old": 40894464, - "mem_free": 47045752, - "uptime": 191730, - }, - job1.Collect(), - ) - - job2 := New() - job2.HTTP.Request.URL = ts.URL + "/actuator/prometheus2" - assert.True(t, job2.Init()) - assert.True(t, job2.Check()) - assert.EqualValues( - t, - map[string]int64{ - "threads": 36, - "threads_daemon": 22, - "resp_1xx": 0, - "resp_2xx": 57740, - "resp_3xx": 0, - "resp_4xx": 4, - "resp_5xx": 0, - "heap_used_eden": 18052960, - "heap_used_survivor": 302704, - "heap_used_old": 40122672, - "heap_committed_eden": 21430272, - "heap_committed_survivor": 2621440, - "heap_committed_old": 53182464, - "mem_free": 18755840, - "uptime": 45501125, - }, - job2.Collect(), - ) -} - -func TestSpringboot2_404(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - })) - defer ts.Close() - job := New() - job.HTTP.Request.URL = ts.URL + "/actuator/prometheus" - - job.Init() - - assert.False(t, job.Check()) - - job.Cleanup() -} - -func TestSpringBoot2_Charts(t *testing.T) { - job := New() - charts := job.Charts() - - assert.True(t, charts.Has("response_codes")) - assert.True(t, charts.Has("uptime")) -} diff --git a/modules/springboot2/tests/testdata.txt b/modules/springboot2/tests/testdata.txt deleted file mode 100644 index 11c70e40d..000000000 --- a/modules/springboot2/tests/testdata.txt +++ /dev/null @@ -1,194 +0,0 @@ -# HELP tomcat_cache_access_total -# TYPE tomcat_cache_access_total counter -tomcat_cache_access_total 0.0 -# HELP jvm_gc_memory_promoted_bytes_total Count of positive increases in the size of the old generation memory pool before GC to after GC -# TYPE jvm_gc_memory_promoted_bytes_total counter -jvm_gc_memory_promoted_bytes_total 562080.0 -# HELP tomcat_cache_hit_total -# TYPE tomcat_cache_hit_total counter -tomcat_cache_hit_total 0.0 -# HELP jvm_gc_live_data_size_bytes Size of old generation memory pool after a full GC -# TYPE jvm_gc_live_data_size_bytes gauge -jvm_gc_live_data_size_bytes 0.0 -# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management -# TYPE jvm_memory_max_bytes gauge -jvm_memory_max_bytes{area="nonheap",id="Code Cache",} 2.5165824E8 -jvm_memory_max_bytes{area="nonheap",id="Metaspace",} -1.0 -jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 -jvm_memory_max_bytes{area="heap",id="PS Eden Space",} 1.55189248E8 -jvm_memory_max_bytes{area="heap",id="PS Survivor Space",} 8912896.0 -jvm_memory_max_bytes{area="heap",id="PS Old Gen",} 3.49700096E8 -# HELP system_cpu_count The number of processors available to the Java virtual machine -# TYPE system_cpu_count gauge -system_cpu_count 2.0 -# HELP tomcat_global_request_seconds -# TYPE tomcat_global_request_seconds summary -tomcat_global_request_seconds_count{name="http-nio-8080",} 23.0 -tomcat_global_request_seconds_sum{name="http-nio-8080",} 1.205 -# HELP jvm_threads_daemon The current number of live daemon threads -# TYPE jvm_threads_daemon gauge -jvm_threads_daemon 21.0 -# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool -# TYPE jvm_buffer_memory_used_bytes gauge -jvm_buffer_memory_used_bytes{id="direct",} 81920.0 -jvm_buffer_memory_used_bytes{id="mapped",} 0.0 -# HELP jvm_buffer_count An estimate of the number of buffers in the pool -# TYPE jvm_buffer_count gauge -jvm_buffer_count{id="direct",} 10.0 -jvm_buffer_count{id="mapped",} 0.0 -# HELP tomcat_threads_current -# TYPE tomcat_threads_current gauge -tomcat_threads_current{name="http-nio-8080",} 10.0 -# HELP tomcat_sessions_created_total -# TYPE tomcat_sessions_created_total counter -tomcat_sessions_created_total 0.0 -# HELP system_cpu_usage The "recent cpu usage" for the whole system -# TYPE system_cpu_usage gauge -system_cpu_usage 0.03682658419046249 -# HELP tomcat_sessions_alive_max_seconds -# TYPE tomcat_sessions_alive_max_seconds gauge -tomcat_sessions_alive_max_seconds 0.0 -# HELP tomcat_servlet_error_total -# TYPE tomcat_servlet_error_total counter -tomcat_servlet_error_total{name="default",} 0.0 -# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time -# TYPE system_load_average_1m gauge -system_load_average_1m 0.2001953125 -# HELP jvm_gc_max_data_size_bytes Max size of old generation memory pool -# TYPE jvm_gc_max_data_size_bytes gauge -jvm_gc_max_data_size_bytes 0.0 -# HELP tomcat_sessions_expired_total -# TYPE tomcat_sessions_expired_total counter -tomcat_sessions_expired_total 0.0 -# HELP tomcat_sessions_rejected_total -# TYPE tomcat_sessions_rejected_total counter -tomcat_sessions_rejected_total 0.0 -# HELP process_start_time_seconds The start time of the Java virtual machine -# TYPE process_start_time_seconds gauge -process_start_time_seconds 1.544161580708E9 -# HELP jvm_threads_live The current number of live threads including both daemon and non-daemon threads -# TYPE jvm_threads_live gauge -jvm_threads_live 23.0 -# HELP jvm_classes_loaded The number of classes that are currently loaded in the Java virtual machine -# TYPE jvm_classes_loaded gauge -jvm_classes_loaded 7846.0 -# HELP jvm_gc_memory_allocated_bytes_total Incremented for an increase in the size of the young generation memory pool after one GC to before the next -# TYPE jvm_gc_memory_allocated_bytes_total counter -jvm_gc_memory_allocated_bytes_total 3.13524224E8 -# HELP process_uptime_seconds The uptime of the Java virtual machine -# TYPE process_uptime_seconds gauge -process_uptime_seconds 191.73 -# HELP tomcat_global_error_total -# TYPE tomcat_global_error_total counter -tomcat_global_error_total{name="http-nio-8080",} 4.0 -# HELP tomcat_threads_config_max -# TYPE tomcat_threads_config_max gauge -tomcat_threads_config_max{name="http-nio-8080",} 200.0 -# HELP jvm_threads_peak The peak live thread count since the Java virtual machine started or peak was reset -# TYPE jvm_threads_peak gauge -jvm_threads_peak 25.0 -# HELP jvm_classes_unloaded_total The total number of classes unloaded since the Java virtual machine has started execution -# TYPE jvm_classes_unloaded_total counter -jvm_classes_unloaded_total 0.0 -# HELP process_files_max The maximum file descriptor count -# TYPE process_files_max gauge -process_files_max 1048576.0 -# HELP tomcat_servlet_request_max_seconds -# TYPE tomcat_servlet_request_max_seconds gauge -tomcat_servlet_request_max_seconds{name="default",} 0.0 -# HELP tomcat_sessions_active_max -# TYPE tomcat_sessions_active_max gauge -tomcat_sessions_active_max 0.0 -# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use -# TYPE jvm_memory_committed_bytes gauge -jvm_memory_committed_bytes{area="nonheap",id="Code Cache",} 1.3369344E7 -jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 4.390912E7 -jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space",} 5636096.0 -jvm_memory_committed_bytes{area="heap",id="PS Eden Space",} 1.53616384E8 -jvm_memory_committed_bytes{area="heap",id="PS Survivor Space",} 8912896.0 -jvm_memory_committed_bytes{area="heap",id="PS Old Gen",} 4.0894464E7 -# HELP tomcat_servlet_request_seconds -# TYPE tomcat_servlet_request_seconds summary -tomcat_servlet_request_seconds_count{name="default",} 0.0 -tomcat_servlet_request_seconds_sum{name="default",} 0.0 -# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool -# TYPE jvm_buffer_total_capacity_bytes gauge -jvm_buffer_total_capacity_bytes{id="direct",} 81920.0 -jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 -# HELP tomcat_global_received_bytes_total -# TYPE tomcat_global_received_bytes_total counter -tomcat_global_received_bytes_total{name="http-nio-8080",} 0.0 -# HELP jvm_gc_pause_seconds Time spent in GC pause -# TYPE jvm_gc_pause_seconds summary -jvm_gc_pause_seconds_count{action="end of minor GC",cause="Allocation Failure",} 2.0 -jvm_gc_pause_seconds_sum{action="end of minor GC",cause="Allocation Failure",} 0.06 -# HELP jvm_gc_pause_seconds_max Time spent in GC pause -# TYPE jvm_gc_pause_seconds_max gauge -jvm_gc_pause_seconds_max{action="end of minor GC",cause="Allocation Failure",} 0.0 -# HELP process_files_open The open file descriptor count -# TYPE process_files_open gauge -process_files_open 29.0 -# HELP tomcat_global_sent_bytes_total -# TYPE tomcat_global_sent_bytes_total counter -tomcat_global_sent_bytes_total{name="http-nio-8080",} 63044.0 -# HELP tomcat_threads_busy -# TYPE tomcat_threads_busy gauge -tomcat_threads_busy{name="http-nio-8080",} 1.0 -# HELP tomcat_global_request_max_seconds -# TYPE tomcat_global_request_max_seconds gauge -tomcat_global_request_max_seconds{name="http-nio-8080",} 0.282 -# HELP process_cpu_usage The "recent cpu usage" for the Java Virtual Machine process -# TYPE process_cpu_usage gauge -process_cpu_usage 0.019132561317701215 -# HELP jvm_memory_used_bytes The amount of used memory -# TYPE jvm_memory_used_bytes gauge -jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 1.3269376E7 -jvm_memory_used_bytes{area="nonheap",id="Metaspace",} 4.1364704E7 -jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space",} 5125872.0 -jvm_memory_used_bytes{area="heap",id="PS Eden Space",} 1.29649936E8 -jvm_memory_used_bytes{area="heap",id="PS Survivor Space",} 8900136.0 -jvm_memory_used_bytes{area="heap",id="PS Old Gen",} 1.782792E7 -# HELP logback_events_total Number of error level events that made it to the logs -# TYPE logback_events_total counter -logback_events_total{level="error",} 0.0 -logback_events_total{level="warn",} 0.0 -logback_events_total{level="info",} 41.0 -logback_events_total{level="debug",} 0.0 -logback_events_total{level="trace",} 0.0 -# HELP tomcat_sessions_active_current -# TYPE tomcat_sessions_active_current gauge -tomcat_sessions_active_current 0.0 -# HELP http_server_requests_seconds -# TYPE http_server_requests_seconds summary -http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 6.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 0.2367162 -http_server_requests_seconds_count{exception="None",method="GET",status="404",uri="/**",} 3.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="404",uri="/**",} 0.0516521 -http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 5.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 0.0587843 -http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/hello",} 4.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/hello",} 0.0470746 -http_server_requests_seconds_count{exception="None",method="GET",status="102",uri="/hello",} 1.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="102",uri="/hello",} 0.0470746 -http_server_requests_seconds_count{exception="None",method="GET",status="302",uri="/hello",} 1.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="302",uri="/hello",} 0.0470746 -http_server_requests_seconds_count{exception="None",method="GET",status="503",uri="/hello",} 1.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="503",uri="/hello",} 0.0470746 -http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/",} 2.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/",} 0.1888718 -http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/health",} 1.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/health",} 0.0602562 -http_server_requests_seconds_count{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 1.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 0.0349837 -http_server_requests_seconds_count{exception="None",method="GET",status="200",uri="/actuator/metrics",} 1.0 -http_server_requests_seconds_sum{exception="None",method="GET",status="200",uri="/actuator/metrics",} 0.0170195 -# HELP http_server_requests_seconds_max -# TYPE http_server_requests_seconds_max gauge -http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/prometheus",} 0.1311382 -http_server_requests_seconds_max{exception="None",method="GET",status="404",uri="/**",} 0.031655 -http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/**/favicon.ico",} 0.0449076 -http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/hello",} 0.0248288 -http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/",} 0.1840505 -http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/health",} 0.0602562 -http_server_requests_seconds_max{exception="None",method="GET",status="404",uri="/actuator/metrics/{requiredMetricName}",} 0.0349837 -http_server_requests_seconds_max{exception="None",method="GET",status="200",uri="/actuator/metrics",} 0.0170195 \ No newline at end of file diff --git a/modules/springboot2/tests/testdata2.txt b/modules/springboot2/tests/testdata2.txt deleted file mode 100644 index 78bbdf5cd..000000000 --- a/modules/springboot2/tests/testdata2.txt +++ /dev/null @@ -1,193 +0,0 @@ -# HELP jvm_classes_loaded_classes The number of classes that are currently loaded in the Java virtual machine -# TYPE jvm_classes_loaded_classes gauge -jvm_classes_loaded_classes 12360.0 -# HELP process_files_open_files The open file descriptor count -# TYPE process_files_open_files gauge -process_files_open_files 46.0 -# HELP jvm_memory_used_bytes The amount of used memory -# TYPE jvm_memory_used_bytes gauge -jvm_memory_used_bytes{area="heap",id="Tenured Gen",} 4.0122672E7 -jvm_memory_used_bytes{area="heap",id="Eden Space",} 1.805296E7 -jvm_memory_used_bytes{area="nonheap",id="Metaspace",} 6.6824752E7 -jvm_memory_used_bytes{area="nonheap",id="Code Cache",} 2.6224704E7 -jvm_memory_used_bytes{area="heap",id="Survivor Space",} 302704.0 -jvm_memory_used_bytes{area="nonheap",id="Compressed Class Space",} 8236936.0 -# HELP system_cpu_count The number of processors available to the Java virtual machine -# TYPE system_cpu_count gauge -system_cpu_count 1.0 -# HELP process_cpu_usage The "recent cpu usage" for the Java Virtual Machine process -# TYPE process_cpu_usage gauge -process_cpu_usage 0.0 -# HELP tomcat_sessions_alive_max_seconds -# TYPE tomcat_sessions_alive_max_seconds gauge -tomcat_sessions_alive_max_seconds 0.0 -# HELP tomcat_global_sent_bytes_total -# TYPE tomcat_global_sent_bytes_total counter -tomcat_global_sent_bytes_total{name="http-nio-17001",} 7.06007212E8 -# HELP jvm_threads_states_threads The current number of threads having NEW state -# TYPE jvm_threads_states_threads gauge -jvm_threads_states_threads{state="runnable",} 10.0 -jvm_threads_states_threads{state="blocked",} 0.0 -jvm_threads_states_threads{state="waiting",} 22.0 -jvm_threads_states_threads{state="timed-waiting",} 4.0 -jvm_threads_states_threads{state="new",} 0.0 -jvm_threads_states_threads{state="terminated",} 0.0 -# HELP process_start_time_seconds Start time of the process since unix epoch. -# TYPE process_start_time_seconds gauge -process_start_time_seconds 1.552476492313E9 -# HELP tomcat_sessions_active_max_sessions -# TYPE tomcat_sessions_active_max_sessions gauge -tomcat_sessions_active_max_sessions 0.0 -# HELP jvm_gc_live_data_size_bytes Size of old generation memory pool after a full GC -# TYPE jvm_gc_live_data_size_bytes gauge -jvm_gc_live_data_size_bytes 3.1908592E7 -# HELP spring_integration_channels The number of message channels -# TYPE spring_integration_channels gauge -spring_integration_channels 6.0 -# HELP system_cpu_usage The "recent cpu usage" for the whole system -# TYPE system_cpu_usage gauge -system_cpu_usage 0.047619047619047616 -# HELP jvm_classes_unloaded_classes_total The total number of classes unloaded since the Java virtual machine has started execution -# TYPE jvm_classes_unloaded_classes_total counter -jvm_classes_unloaded_classes_total 0.0 -# HELP jvm_memory_max_bytes The maximum amount of memory in bytes that can be used for memory management -# TYPE jvm_memory_max_bytes gauge -jvm_memory_max_bytes{area="heap",id="Tenured Gen",} 6.61323776E8 -jvm_memory_max_bytes{area="heap",id="Eden Space",} 2.64568832E8 -jvm_memory_max_bytes{area="nonheap",id="Metaspace",} -1.0 -jvm_memory_max_bytes{area="nonheap",id="Code Cache",} 2.5165824E8 -jvm_memory_max_bytes{area="heap",id="Survivor Space",} 3.3030144E7 -jvm_memory_max_bytes{area="nonheap",id="Compressed Class Space",} 1.073741824E9 -# HELP logback_events_total Number of error level events that made it to the logs -# TYPE logback_events_total counter -logback_events_total{level="warn",} 1.0 -logback_events_total{level="debug",} 0.0 -logback_events_total{level="error",} 0.0 -logback_events_total{level="trace",} 0.0 -logback_events_total{level="info",} 30.0 -# HELP jvm_gc_max_data_size_bytes Max size of old generation memory pool -# TYPE jvm_gc_max_data_size_bytes gauge -jvm_gc_max_data_size_bytes 6.61323776E8 -# HELP tomcat_sessions_created_sessions_total -# TYPE tomcat_sessions_created_sessions_total counter -tomcat_sessions_created_sessions_total 0.0 -# HELP process_files_max_files The maximum file descriptor count -# TYPE process_files_max_files gauge -process_files_max_files 1006500.0 -# HELP spring_integration_sources The number of message sources -# TYPE spring_integration_sources gauge -spring_integration_sources 5.0 -# HELP tomcat_global_request_seconds -# TYPE tomcat_global_request_seconds summary -tomcat_global_request_seconds_count{name="http-nio-17001",} 57744.0 -tomcat_global_request_seconds_sum{name="http-nio-17001",} 113.513 -# HELP tomcat_sessions_active_current_sessions -# TYPE tomcat_sessions_active_current_sessions gauge -tomcat_sessions_active_current_sessions 0.0 -# HELP tomcat_global_error_total -# TYPE tomcat_global_error_total counter -tomcat_global_error_total{name="http-nio-17001",} 0.0 -# HELP jvm_threads_daemon_threads The current number of live daemon threads -# TYPE jvm_threads_daemon_threads gauge -jvm_threads_daemon_threads 22.0 -# HELP jvm_gc_memory_allocated_bytes_total Incremented for an increase in the size of the young generation memory pool after one GC to before the next -# TYPE jvm_gc_memory_allocated_bytes_total counter -jvm_gc_memory_allocated_bytes_total 2.7071024304E10 -# HELP http_server_requests_seconds -# TYPE http_server_requests_seconds summary -http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 57717.0 -http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 108.648599202 -http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 13.0 -http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 2.504856475 -http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 1.0 -http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 5.959808087 -http_server_requests_seconds_count{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 9.0 -http_server_requests_seconds_sum{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 0.0506538 -http_server_requests_seconds_count{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 4.0 -http_server_requests_seconds_sum{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 0.00875155 -# HELP http_server_requests_seconds_max -# TYPE http_server_requests_seconds_max gauge -http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/actuator/prometheus",} 0.007270684 -http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/form",} 0.0 -http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/search/",} 0.0 -http_server_requests_seconds_max{exception="None",method="GET",outcome="SUCCESS",status="200",uri="/**/favicon.ico",} 0.0 -http_server_requests_seconds_max{exception="None",method="GET",outcome="CLIENT_ERROR",status="404",uri="/**",} 0.0 -# HELP jvm_buffer_total_capacity_bytes An estimate of the total capacity of the buffers in this pool -# TYPE jvm_buffer_total_capacity_bytes gauge -jvm_buffer_total_capacity_bytes{id="direct",} 278528.0 -jvm_buffer_total_capacity_bytes{id="mapped",} 0.0 -# HELP spring_integration_handlers The number of message handlers -# TYPE spring_integration_handlers gauge -spring_integration_handlers 5.0 -# HELP jvm_gc_memory_promoted_bytes_total Count of positive increases in the size of the old generation memory pool before GC to after GC -# TYPE jvm_gc_memory_promoted_bytes_total counter -jvm_gc_memory_promoted_bytes_total 2.4583704E7 -# HELP jvm_buffer_count_buffers An estimate of the number of buffers in the pool -# TYPE jvm_buffer_count_buffers gauge -jvm_buffer_count_buffers{id="direct",} 15.0 -jvm_buffer_count_buffers{id="mapped",} 0.0 -# HELP jvm_memory_committed_bytes The amount of memory in bytes that is committed for the Java virtual machine to use -# TYPE jvm_memory_committed_bytes gauge -jvm_memory_committed_bytes{area="heap",id="Tenured Gen",} 5.3182464E7 -jvm_memory_committed_bytes{area="heap",id="Eden Space",} 2.1430272E7 -jvm_memory_committed_bytes{area="nonheap",id="Metaspace",} 7.0803456E7 -jvm_memory_committed_bytes{area="nonheap",id="Code Cache",} 2.6804224E7 -jvm_memory_committed_bytes{area="heap",id="Survivor Space",} 2621440.0 -jvm_memory_committed_bytes{area="nonheap",id="Compressed Class Space",} 8953856.0 -# HELP tomcat_global_request_max_seconds -# TYPE tomcat_global_request_max_seconds gauge -tomcat_global_request_max_seconds{name="http-nio-17001",} 6.049 -# HELP process_uptime_seconds The uptime of the Java virtual machine -# TYPE process_uptime_seconds gauge -process_uptime_seconds 45501.125 -# HELP tomcat_threads_config_max_threads -# TYPE tomcat_threads_config_max_threads gauge -tomcat_threads_config_max_threads{name="http-nio-17001",} 200.0 -# HELP jvm_buffer_memory_used_bytes An estimate of the memory that the Java virtual machine is using for this buffer pool -# TYPE jvm_buffer_memory_used_bytes gauge -jvm_buffer_memory_used_bytes{id="direct",} 278529.0 -jvm_buffer_memory_used_bytes{id="mapped",} 0.0 -# HELP http_client_requests_seconds Timer of WebClient operation -# TYPE http_client_requests_seconds summary -http_client_requests_seconds_count{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 1.0 -http_client_requests_seconds_sum{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 2.258042154 -http_client_requests_seconds_count{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 2.0 -http_client_requests_seconds_sum{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 0.305785165 -# HELP http_client_requests_seconds_max Timer of WebClient operation -# TYPE http_client_requests_seconds_max gauge -http_client_requests_seconds_max{clientName="search.example.com",method="GET",status="IO_ERROR",uri="/dictionary",} 0.0 -http_client_requests_seconds_max{clientName="api.search.example.com",method="GET",status="200",uri="/v1/items",} 0.0 -# HELP tomcat_global_received_bytes_total -# TYPE tomcat_global_received_bytes_total counter -tomcat_global_received_bytes_total{name="http-nio-17001",} 0.0 -# HELP jvm_threads_peak_threads The peak live thread count since the Java virtual machine started or peak was reset -# TYPE jvm_threads_peak_threads gauge -jvm_threads_peak_threads 36.0 -# HELP jvm_threads_live_threads The current number of live threads including both daemon and non-daemon threads -# TYPE jvm_threads_live_threads gauge -jvm_threads_live_threads 36.0 -# HELP system_load_average_1m The sum of the number of runnable entities queued to available processors and the number of runnable entities running on the available processors averaged over a period of time -# TYPE system_load_average_1m gauge -system_load_average_1m 0.02 -# HELP tomcat_threads_current_threads -# TYPE tomcat_threads_current_threads gauge -tomcat_threads_current_threads{name="http-nio-17001",} 10.0 -# HELP tomcat_sessions_expired_sessions_total -# TYPE tomcat_sessions_expired_sessions_total counter -tomcat_sessions_expired_sessions_total 0.0 -# HELP tomcat_sessions_rejected_sessions_total -# TYPE tomcat_sessions_rejected_sessions_total counter -tomcat_sessions_rejected_sessions_total 0.0 -# HELP jvm_gc_pause_seconds Time spent in GC pause -# TYPE jvm_gc_pause_seconds summary -jvm_gc_pause_seconds_count{action="end of major GC",cause="Metadata GC Threshold",} 1.0 -jvm_gc_pause_seconds_sum{action="end of major GC",cause="Metadata GC Threshold",} 0.1 -jvm_gc_pause_seconds_count{action="end of minor GC",cause="Allocation Failure",} 1269.0 -jvm_gc_pause_seconds_sum{action="end of minor GC",cause="Allocation Failure",} 5.909 -# HELP jvm_gc_pause_seconds_max Time spent in GC pause -# TYPE jvm_gc_pause_seconds_max gauge -jvm_gc_pause_seconds_max{action="end of major GC",cause="Metadata GC Threshold",} 0.0 -jvm_gc_pause_seconds_max{action="end of minor GC",cause="Allocation Failure",} 0.004 -# HELP tomcat_threads_busy_threads -# TYPE tomcat_threads_busy_threads gauge -tomcat_threads_busy_threads{name="http-nio-17001",} 1.0 \ No newline at end of file diff --git a/modules/squidlog/collect.go b/modules/squidlog/collect.go index 20d3f86e8..bafa6d4cc 100644 --- a/modules/squidlog/collect.go +++ b/modules/squidlog/collect.go @@ -14,7 +14,7 @@ import ( "github.com/netdata/go.d.plugin/agent/module" ) -func (s SquidLog) logPanicStackIfAny() { +func (s *SquidLog) logPanicStackIfAny() { err := recover() if err == nil { return diff --git a/modules/squidlog/config_schema.json b/modules/squidlog/config_schema.json index dcf439c70..f33776343 100644 --- a/modules/squidlog/config_schema.json +++ b/modules/squidlog/config_schema.json @@ -1,101 +1,108 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/squid_log job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "parser": { - "type": "object", - "properties": { - "log_type": { - "type": "string" - }, - "csv_config": { - "type": "object", - "properties": { - "fields_per_record": { - "type": "integer" - }, - "delimiter": { - "type": "string" - }, - "trim_leading_space": { - "type": "boolean" - }, - "format": { - "type": "string" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/squid_log job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "parser": { + "type": "object", + "properties": { + "log_type": { + "type": "string" }, - "required": [ - "fields_per_record", - "delimiter", - "trim_leading_space", - "format" - ] - }, - "ltsv_config": { - "type": "object", - "properties": { - "field_delimiter": { - "type": "string" - }, - "value_delimiter": { - "type": "string" - }, - "mapping": { - "type": "object", - "additionalProperties": { + "csv_config": { + "type": "object", + "properties": { + "fields_per_record": { + "type": "integer" + }, + "delimiter": { + "type": "string" + }, + "trim_leading_space": { + "type": "boolean" + }, + "format": { "type": "string" } - } + }, + "required": [ + "fields_per_record", + "delimiter", + "trim_leading_space", + "format" + ] }, - "required": [ - "field_delimiter", - "value_delimiter", - "mapping" - ] - }, - "regexp_config": { - "type": "object", - "properties": { - "pattern": { - "type": "string" - } + "ltsv_config": { + "type": "object", + "properties": { + "field_delimiter": { + "type": "string" + }, + "value_delimiter": { + "type": "string" + }, + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "field_delimiter", + "value_delimiter", + "mapping" + ] }, - "required": [ - "pattern" - ] - }, - "json_config": { - "type": "object", - "properties": { - "mapping": { - "type": "object", - "additionalProperties": { + "regexp_config": { + "type": "object", + "properties": { + "pattern": { "type": "string" } - } + }, + "required": [ + "pattern" + ] }, - "required": [ - "mapping" - ] - } + "json_config": { + "type": "object", + "properties": { + "mapping": { + "type": "object", + "additionalProperties": { + "type": "string" + } + } + }, + "required": [ + "mapping" + ] + } + }, + "required": [ + "log_type" + ] }, - "required": [ - "log_type" - ] - }, - "path": { - "type": "string" + "path": { + "type": "string" + }, + "exclude_path": { + "type": "string" + } }, - "exclude_path": { - "type": "string" - } + "required": [ + "name", + "path" + ] }, - "required": [ - "name", - "path" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/squidlog/squidlog.go b/modules/squidlog/squidlog.go index 704bc9627..3d07c3f0c 100644 --- a/modules/squidlog/squidlog.go +++ b/modules/squidlog/squidlog.go @@ -20,68 +20,70 @@ func init() { } func New() *SquidLog { - cfg := logs.ParserConfig{ - LogType: logs.TypeCSV, - CSV: logs.CSVConfig{ - FieldsPerRecord: -1, - Delimiter: " ", - TrimLeadingSpace: true, - Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type", - CheckField: checkCSVFormatField, - }, - } return &SquidLog{ Config: Config{ Path: "/var/log/squid/access.log", ExcludePath: "*.gz", - Parser: cfg, + Parser: logs.ParserConfig{ + LogType: logs.TypeCSV, + CSV: logs.CSVConfig{ + FieldsPerRecord: -1, + Delimiter: " ", + TrimLeadingSpace: true, + Format: "- $resp_time $client_address $result_code $resp_size $req_method - - $hierarchy $mime_type", + CheckField: checkCSVFormatField, + }, + }, }, } } -type ( - Config struct { - Parser logs.ParserConfig `yaml:",inline"` - Path string `yaml:"path"` - ExcludePath string `yaml:"exclude_path"` - } +type Config struct { + Parser logs.ParserConfig `yaml:",inline"` + Path string `yaml:"path"` + ExcludePath string `yaml:"exclude_path"` +} - SquidLog struct { - module.Base - Config `yaml:",inline"` +type SquidLog struct { + module.Base + Config `yaml:",inline"` - file *logs.Reader - parser logs.Parser - line *logLine + file *logs.Reader + parser logs.Parser + line *logLine - mx *metricsData - charts *module.Charts - } -) + mx *metricsData + charts *module.Charts +} -func (s *SquidLog) Init() bool { +func (s *SquidLog) Configuration() any { + return s.Config +} + +func (s *SquidLog) Init() error { s.line = newEmptyLogLine() s.mx = newMetricsData() - return true + return nil } -func (s *SquidLog) Check() bool { +func (s *SquidLog) Check() error { // Note: these inits are here to make auto-detection retry working if err := s.createLogReader(); err != nil { s.Warning("check failed: ", err) - return false + return err } if err := s.createParser(); err != nil { s.Warning("check failed: ", err) - return false + return err } if err := s.createCharts(s.line); err != nil { s.Warning("check failed: ", err) - return false + return err } - return true + + return nil } func (s *SquidLog) Charts() *module.Charts { diff --git a/modules/squidlog/squidlog_test.go b/modules/squidlog/squidlog_test.go index c6d818bf9..cbf6114ee 100644 --- a/modules/squidlog/squidlog_test.go +++ b/modules/squidlog/squidlog_test.go @@ -30,7 +30,7 @@ func TestNew(t *testing.T) { func TestSquidLog_Init(t *testing.T) { squidlog := New() - assert.True(t, squidlog.Init()) + assert.NoError(t, squidlog.Init()) } func TestSquidLog_Check(t *testing.T) { @@ -40,18 +40,18 @@ func TestSquidLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) { squid := New() defer squid.Cleanup() squid.Path = "testdata/not_exists.log" - require.True(t, squid.Init()) + require.NoError(t, squid.Init()) - assert.False(t, squid.Check()) + assert.Error(t, squid.Check()) } func TestSquid_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) { squid := New() defer squid.Cleanup() squid.Path = "testdata/unknown.log" - require.True(t, squid.Init()) + require.NoError(t, squid.Init()) - assert.False(t, squid.Check()) + assert.Error(t, squid.Check()) } func TestSquid_Check_ErrorOnCreatingParserZeroKnownFields(t *testing.T) { @@ -59,9 +59,9 @@ func TestSquid_Check_ErrorOnCreatingParserZeroKnownFields(t *testing.T) { defer squid.Cleanup() squid.Path = "testdata/access.log" squid.Parser.CSV.Format = "$one $two" - require.True(t, squid.Init()) + require.NoError(t, squid.Init()) - assert.False(t, squid.Check()) + assert.Error(t, squid.Check()) } func TestSquidLog_Charts(t *testing.T) { @@ -280,8 +280,8 @@ func prepareSquidCollect(t *testing.T) *SquidLog { t.Helper() squid := New() squid.Path = "testdata/access.log" - require.True(t, squid.Init()) - require.True(t, squid.Check()) + require.NoError(t, squid.Init()) + require.NoError(t, squid.Check()) defer squid.Cleanup() p, err := logs.NewCSVParser(squid.Parser.CSV, bytes.NewReader(nativeFormatAccessLog)) diff --git a/modules/supervisord/config_schema.json b/modules/supervisord/config_schema.json index d3617c94a..38440438e 100644 --- a/modules/supervisord/config_schema.json +++ b/modules/supervisord/config_schema.json @@ -1,21 +1,28 @@ { - "$id": "https://example.com/person.schema.json", - "$schema": "https://json-schema.org/draft/2020-12/schema", - "title": "Supervisord collector job configuration", - "type": "object", - "properties": { - "firstName": { - "type": "string", - "description": "The person's first name." - }, - "lastName": { - "type": "string", - "description": "The person's last name." - }, - "age": { - "description": "Age in years which must be equal to or greater than zero.", - "type": "integer", - "minimum": 0 + "jsonSchema": { + "$id": "https://example.com/person.schema.json", + "$schema": "https://json-schema.org/draft/2020-12/schema", + "title": "Supervisord collector job configuration", + "type": "object", + "properties": { + "firstName": { + "type": "string", + "description": "The person's first name." + }, + "lastName": { + "type": "string", + "description": "The person's last name." + }, + "age": { + "description": "Age in years which must be equal to or greater than zero.", + "type": "integer", + "minimum": 0 + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true } } } diff --git a/modules/supervisord/init.go b/modules/supervisord/init.go index 0c5285c3b..1c401bcd6 100644 --- a/modules/supervisord/init.go +++ b/modules/supervisord/init.go @@ -10,14 +10,14 @@ import ( "github.com/netdata/go.d.plugin/pkg/web" ) -func (s Supervisord) verifyConfig() error { +func (s *Supervisord) verifyConfig() error { if s.URL == "" { return errors.New("'url' not set") } return nil } -func (s Supervisord) initSupervisorClient() (supervisorClient, error) { +func (s *Supervisord) initSupervisorClient() (supervisorClient, error) { u, err := url.Parse(s.URL) if err != nil { return nil, fmt.Errorf("parse 'url': %v (%s)", err, s.URL) diff --git a/modules/supervisord/supervisord.go b/modules/supervisord/supervisord.go index 1c9994710..31ab8d943 100644 --- a/modules/supervisord/supervisord.go +++ b/modules/supervisord/supervisord.go @@ -4,6 +4,7 @@ package supervisord import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -25,7 +26,7 @@ func New() *Supervisord { Config: Config{ URL: "http://127.0.0.1:9001/RPC2", Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, @@ -55,25 +56,37 @@ type ( } ) -func (s *Supervisord) Init() bool { +func (s *Supervisord) Configuration() any { + return s.Config +} + +func (s *Supervisord) Init() error { err := s.verifyConfig() if err != nil { s.Errorf("verify config: %v", err) - return false + return err } client, err := s.initSupervisorClient() if err != nil { s.Errorf("init supervisord client: %v", err) - return false + return err } s.client = client - return true + return nil } -func (s *Supervisord) Check() bool { - return len(s.Collect()) > 0 +func (s *Supervisord) Check() error { + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (s *Supervisord) Charts() *module.Charts { diff --git a/modules/supervisord/supervisord_test.go b/modules/supervisord/supervisord_test.go index 23ef1ff0c..725c0fe29 100644 --- a/modules/supervisord/supervisord_test.go +++ b/modules/supervisord/supervisord_test.go @@ -38,9 +38,9 @@ func TestSupervisord_Init(t *testing.T) { supvr.Config = test.config if test.wantFail { - assert.False(t, supvr.Init()) + assert.Error(t, supvr.Init()) } else { - assert.True(t, supvr.Init()) + assert.NoError(t, supvr.Init()) } }) } @@ -69,9 +69,9 @@ func TestSupervisord_Check(t *testing.T) { defer supvr.Cleanup() if test.wantFail { - assert.False(t, supvr.Check()) + assert.Error(t, supvr.Check()) } else { - assert.True(t, supvr.Check()) + assert.NoError(t, supvr.Check()) } }) } @@ -79,7 +79,7 @@ func TestSupervisord_Check(t *testing.T) { func TestSupervisord_Charts(t *testing.T) { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) assert.NotNil(t, supvr.Charts()) } @@ -88,7 +88,7 @@ func TestSupervisord_Cleanup(t *testing.T) { supvr := New() assert.NotPanics(t, supvr.Cleanup) - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) m := &mockSupervisorClient{} supvr.client = m @@ -188,21 +188,21 @@ func ensureCollectedProcessesAddedToCharts(t *testing.T, supvr *Supervisord) { func prepareSupervisordSuccessOnGetAllProcessInfo(t *testing.T) *Supervisord { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) supvr.client = &mockSupervisorClient{} return supvr } func prepareSupervisordZeroProcessesOnGetAllProcessInfo(t *testing.T) *Supervisord { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) supvr.client = &mockSupervisorClient{returnZeroProcesses: true} return supvr } func prepareSupervisordErrorOnGetAllProcessInfo(t *testing.T) *Supervisord { supvr := New() - require.True(t, supvr.Init()) + require.NoError(t, supvr.Init()) supvr.client = &mockSupervisorClient{errOnGetAllProcessInfo: true} return supvr } diff --git a/modules/systemdunits/collect.go b/modules/systemdunits/collect.go index 2843a4230..eb596605f 100644 --- a/modules/systemdunits/collect.go +++ b/modules/systemdunits/collect.go @@ -148,7 +148,7 @@ func (s *SystemdUnits) getSystemdVersion(conn systemdConnection) (int, error) { } func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus, error) { - ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration()) defer cancel() s.Debugf("calling function 'ListUnits'") @@ -169,7 +169,7 @@ func (s *SystemdUnits) getLoadedUnits(conn systemdConnection) ([]dbus.UnitStatus } func (s *SystemdUnits) getLoadedUnitsByPatterns(conn systemdConnection) ([]dbus.UnitStatus, error) { - ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration) + ctx, cancel := context.WithTimeout(context.Background(), s.Timeout.Duration()) defer cancel() s.Debugf("calling function 'ListUnitsByPatterns'") diff --git a/modules/systemdunits/config_schema.json b/modules/systemdunits/config_schema.json index 5a9df2571..d78e55508 100644 --- a/modules/systemdunits/config_schema.json +++ b/modules/systemdunits/config_schema.json @@ -1,27 +1,34 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/systemdunits job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "include": { - "type": "array", - "items": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/systemdunits job configuration schema.", + "type": "object", + "properties": { + "name": { "type": "string" }, - "minItems": 1 + "include": { + "type": "array", + "items": { + "type": "string" + }, + "minItems": 1 + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "include" + ] }, - "required": [ - "name", - "include" - ] -} \ No newline at end of file + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } +} diff --git a/modules/systemdunits/systemdunits.go b/modules/systemdunits/systemdunits.go index 3593b531e..503b06ff3 100644 --- a/modules/systemdunits/systemdunits.go +++ b/modules/systemdunits/systemdunits.go @@ -7,6 +7,7 @@ package systemdunits import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -33,7 +34,7 @@ func New() *SystemdUnits { Include: []string{ "*.service", }, - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, charts: &module.Charts{}, @@ -61,27 +62,40 @@ type SystemdUnits struct { charts *module.Charts } -func (s *SystemdUnits) Init() bool { +func (s *SystemdUnits) Configuration() any { + return s.Config +} + +func (s *SystemdUnits) Init() error { err := s.validateConfig() if err != nil { s.Errorf("config validation: %v", err) - return false + return err } sr, err := s.initSelector() if err != nil { s.Errorf("init selector: %v", err) - return false + return err } s.sr = sr s.Debugf("unit names patterns: %v", s.Include) s.Debugf("timeout: %s", s.Timeout) - return true + + return nil } -func (s *SystemdUnits) Check() bool { - return len(s.Collect()) > 0 +func (s *SystemdUnits) Check() error { + mx, err := s.collect() + if err != nil { + s.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (s *SystemdUnits) Charts() *module.Charts { @@ -89,15 +103,15 @@ func (s *SystemdUnits) Charts() *module.Charts { } func (s *SystemdUnits) Collect() map[string]int64 { - ms, err := s.collect() + mx, err := s.collect() if err != nil { s.Error(err) } - if len(ms) == 0 { + if len(mx) == 0 { return nil } - return ms + return mx } func (s *SystemdUnits) Cleanup() { diff --git a/modules/systemdunits/systemdunits_test.go b/modules/systemdunits/systemdunits_test.go index baa9ed46a..606708bd5 100644 --- a/modules/systemdunits/systemdunits_test.go +++ b/modules/systemdunits/systemdunits_test.go @@ -48,9 +48,9 @@ func TestSystemdUnits_Init(t *testing.T) { systemd.Config = test.config if test.wantFail { - assert.False(t, systemd.Init()) + assert.Error(t, systemd.Init()) } else { - assert.True(t, systemd.Init()) + assert.NoError(t, systemd.Init()) } }) } @@ -115,12 +115,12 @@ func TestSystemdUnits_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { systemd := test.prepare() - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) if test.wantFail { - assert.False(t, systemd.Check()) + assert.Error(t, systemd.Check()) } else { - assert.True(t, systemd.Check()) + assert.NoError(t, systemd.Check()) } }) } @@ -128,7 +128,7 @@ func TestSystemdUnits_Check(t *testing.T) { func TestSystemdUnits_Charts(t *testing.T) { systemd := New() - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) assert.NotNil(t, systemd.Charts()) } @@ -138,7 +138,7 @@ func TestSystemdUnits_Cleanup(t *testing.T) { client := prepareOKClient(230) systemd.client = client - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) require.NotNil(t, systemd.Collect()) conn := systemd.conn systemd.Cleanup() @@ -681,7 +681,7 @@ func TestSystemdUnits_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { systemd := test.prepare() - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) var collected map[string]int64 @@ -702,7 +702,7 @@ func TestSystemdUnits_connectionReuse(t *testing.T) { systemd.Include = []string{"*"} client := prepareOKClient(230) systemd.client = client - require.True(t, systemd.Init()) + require.NoError(t, systemd.Init()) var collected map[string]int64 for i := 0; i < 10; i++ { diff --git a/modules/tengine/config_schema.json b/modules/tengine/config_schema.json index 30958bb1b..fdf7f45bb 100644 --- a/modules/tengine/config_schema.json +++ b/modules/tengine/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/tengine job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Tengine collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Tengine status page to monitor.", + "type": "string", + "default": "http://127.0.0.1/us" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/tengine/tengine.go b/modules/tengine/tengine.go index 169b390ab..8d341b773 100644 --- a/modules/tengine/tengine.go +++ b/modules/tengine/tengine.go @@ -4,6 +4,7 @@ package tengine import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -21,70 +22,81 @@ func init() { }) } -const ( - defaultURL = "http://127.0.0.1/us" - defaultHTTPTimeout = time.Second * 2 -) - // New creates Tengine with default values. func New() *Tengine { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: defaultURL, - }, - Client: web.Client{ - Timeout: web.Duration{Duration: defaultHTTPTimeout}, + return &Tengine{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1/us", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second * 2), + }, }, }, + charts: charts.Copy(), } - return &Tengine{Config: config} } // Config is the Tengine module configuration. type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } // Tengine Tengine module. type Tengine struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` + + charts *module.Charts apiClient *apiClient } -// Cleanup makes cleanup. -func (Tengine) Cleanup() {} +func (t *Tengine) Configuration() any { + return t.Config +} // Init makes initialization. -func (t *Tengine) Init() bool { +func (t *Tengine) Init() error { if t.URL == "" { - t.Error("URL not set") - return false + t.Error("url not set") + return errors.New("url not set") } client, err := web.NewHTTPClient(t.Client) if err != nil { t.Errorf("error on creating http client : %v", err) - return false + return err } t.apiClient = newAPIClient(client, t.Request) t.Debugf("using URL: %s", t.URL) - t.Debugf("using timeout: %s", t.Timeout.Duration) - return true + t.Debugf("using timeout: %s", t.Timeout) + + return nil } // Check makes check -func (t *Tengine) Check() bool { - return len(t.Collect()) > 0 +func (t *Tengine) Check() error { + mx, err := t.collect() + if err != nil { + t.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts returns Charts. -func (t Tengine) Charts() *module.Charts { - return charts.Copy() +func (t *Tengine) Charts() *module.Charts { + return t.charts } // Collect collects metrics. @@ -98,3 +110,10 @@ func (t *Tengine) Collect() map[string]int64 { return mx } + +// Cleanup makes cleanup. +func (t *Tengine) Cleanup() { + if t.apiClient != nil && t.apiClient.httpClient != nil { + t.apiClient.httpClient.CloseIdleConnections() + } +} diff --git a/modules/tengine/tengine_test.go b/modules/tengine/tengine_test.go index 04fe5f9e7..74a8801cb 100644 --- a/modules/tengine/tengine_test.go +++ b/modules/tengine/tengine_test.go @@ -8,7 +8,6 @@ import ( "os" "testing" - "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -17,20 +16,14 @@ var ( testStatusData, _ = os.ReadFile("testdata/status.txt") ) -func TestTengine_Cleanup(t *testing.T) { New().Cleanup() } - -func TestNew(t *testing.T) { - job := New() - - assert.Implements(t, (*module.Module)(nil), job) - assert.Equal(t, defaultURL, job.URL) - assert.Equal(t, defaultHTTPTimeout, job.Timeout.Duration) +func TestTengine_Cleanup(t *testing.T) { + New().Cleanup() } func TestTengine_Init(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) assert.NotNil(t, job.apiClient) } @@ -44,16 +37,16 @@ func TestTengine_Check(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.True(t, job.Check()) + require.NoError(t, job.Init()) + assert.NoError(t, job.Check()) } func TestTengine_CheckNG(t *testing.T) { job := New() job.URL = "http://127.0.0.1:38001/us" - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestTengine_Charts(t *testing.T) { assert.NotNil(t, New().Charts()) } @@ -68,8 +61,8 @@ func TestTengine_Collect(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - require.True(t, job.Check()) + require.NoError(t, job.Init()) + require.NoError(t, job.Check()) expected := map[string]int64{ "bytes_in": 5944, @@ -116,8 +109,8 @@ func TestTengine_InvalidData(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } func TestTengine_404(t *testing.T) { @@ -130,6 +123,6 @@ func TestTengine_404(t *testing.T) { job := New() job.URL = ts.URL - require.True(t, job.Init()) - assert.False(t, job.Check()) + require.NoError(t, job.Init()) + assert.Error(t, job.Check()) } diff --git a/modules/traefik/config_schema.json b/modules/traefik/config_schema.json index 0596ef83b..0dbe198cc 100644 --- a/modules/traefik/config_schema.json +++ b/modules/traefik/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/traefik job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Traefik collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Traefik metrics endpoint.", + "type": "string", + "default": "http://127.0.0.1:8082/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/traefik/traefik.go b/modules/traefik/traefik.go index a121b0236..77bf48494 100644 --- a/modules/traefik/traefik.go +++ b/modules/traefik/traefik.go @@ -4,6 +4,7 @@ package traefik import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -29,7 +30,7 @@ func New() *Traefik { URL: "http://127.0.0.1:8082/metrics", }, Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + Timeout: web.Duration(time.Second), }, }, }, @@ -43,13 +44,15 @@ func New() *Traefik { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type ( Traefik struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` prom prometheus.Prometheus charts *module.Charts @@ -73,24 +76,36 @@ type ( } ) -func (t *Traefik) Init() bool { +func (t *Traefik) Configuration() any { + return t.Config +} + +func (t *Traefik) Init() error { if err := t.validateConfig(); err != nil { t.Errorf("config validation: %v", err) - return false + return err } prom, err := t.initPrometheusClient() if err != nil { t.Errorf("prometheus client initialization: %v", err) - return false + return err } t.prom = prom - return true + return nil } -func (t *Traefik) Check() bool { - return len(t.Collect()) > 0 +func (t *Traefik) Check() error { + mx, err := t.collect() + if err != nil { + t.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (t *Traefik) Charts() *module.Charts { diff --git a/modules/traefik/traefik_test.go b/modules/traefik/traefik_test.go index c5804b672..b577bba7b 100644 --- a/modules/traefik/traefik_test.go +++ b/modules/traefik/traefik_test.go @@ -62,9 +62,9 @@ func TestTraefik_Init(t *testing.T) { rdb.Config = test.config if test.wantFail { - assert.False(t, rdb.Init()) + assert.Error(t, rdb.Init()) } else { - assert.True(t, rdb.Init()) + assert.NoError(t, rdb.Init()) } }) } @@ -107,9 +107,9 @@ func TestTraefik_Check(t *testing.T) { defer cleanup() if test.wantFail { - assert.False(t, tk.Check()) + assert.Error(t, tk.Check()) } else { - assert.True(t, tk.Check()) + assert.NoError(t, tk.Check()) } }) } @@ -255,7 +255,7 @@ func prepareCaseTraefikV221Metrics(t *testing.T) (*Traefik, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -292,7 +292,7 @@ traefik_entrypoint_request_duration_seconds_count{code="300",entrypoint="web",me })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -320,7 +320,7 @@ application_backend_http_responses_total{proxy="infra-vernemq-ws",code="other"} })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -333,7 +333,7 @@ func prepareCase404Response(t *testing.T) (*Traefik, func()) { })) h := New() h.URL = srv.URL - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, srv.Close } @@ -342,7 +342,7 @@ func prepareCaseConnectionRefused(t *testing.T) (*Traefik, func()) { t.Helper() h := New() h.URL = "http://127.0.0.1:38001" - require.True(t, h.Init()) + require.NoError(t, h.Init()) return h, func() {} } diff --git a/modules/unbound/config_schema.json b/modules/unbound/config_schema.json index 290905ac0..068dde457 100644 --- a/modules/unbound/config_schema.json +++ b/modules/unbound/config_schema.json @@ -1,44 +1,51 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/unbound job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "conf_path": { - "type": "string" - }, - "cumulative_stats": { - "type": "boolean" - }, - "use_tls": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "tls_skip_verify": { - "type": "boolean" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/unbound job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "conf_path": { + "type": "string" + }, + "cumulative_stats": { + "type": "boolean" + }, + "use_tls": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } + }, + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/unbound/init.go b/modules/unbound/init.go index 6ae9543f3..bca49f027 100644 --- a/modules/unbound/init.go +++ b/modules/unbound/init.go @@ -87,9 +87,9 @@ func (u *Unbound) initClient() (err error) { u.client = socket.New(socket.Config{ Address: u.Address, - ConnectTimeout: u.Timeout.Duration, - ReadTimeout: u.Timeout.Duration, - WriteTimeout: u.Timeout.Duration, + ConnectTimeout: u.Timeout.Duration(), + ReadTimeout: u.Timeout.Duration(), + WriteTimeout: u.Timeout.Duration(), TLSConf: tlsCfg, }) return nil diff --git a/modules/unbound/unbound.go b/modules/unbound/unbound.go index 625ef75cd..a61bf41e9 100644 --- a/modules/unbound/unbound.go +++ b/modules/unbound/unbound.go @@ -4,6 +4,7 @@ package unbound import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/socket" @@ -24,60 +25,59 @@ func init() { } func New() *Unbound { - config := Config{ - Address: "127.0.0.1:8953", - ConfPath: "/etc/unbound/unbound.conf", - Timeout: web.Duration{Duration: time.Second}, - Cumulative: false, - UseTLS: true, - TLSConfig: tlscfg.TLSConfig{ - TLSCert: "/etc/unbound/unbound_control.pem", - TLSKey: "/etc/unbound/unbound_control.key", - InsecureSkipVerify: true, - }, - } - return &Unbound{ - Config: config, + Config: Config{ + Address: "127.0.0.1:8953", + ConfPath: "/etc/unbound/unbound.conf", + Timeout: web.Duration(time.Second), + Cumulative: false, + UseTLS: true, + TLSConfig: tlscfg.TLSConfig{ + TLSCert: "/etc/unbound/unbound_control.pem", + TLSKey: "/etc/unbound/unbound_control.key", + InsecureSkipVerify: true, + }, + }, curCache: newCollectCache(), cache: newCollectCache(), } } -type ( - Config struct { - Address string `yaml:"address"` - ConfPath string `yaml:"conf_path"` - Timeout web.Duration `yaml:"timeout"` - Cumulative bool `yaml:"cumulative_stats"` - UseTLS bool `yaml:"use_tls"` - tlscfg.TLSConfig `yaml:",inline"` - } - Unbound struct { - module.Base - Config `yaml:",inline"` +type Config struct { + Address string `yaml:"address"` + ConfPath string `yaml:"conf_path"` + Timeout web.Duration `yaml:"timeout"` + Cumulative bool `yaml:"cumulative_stats"` + UseTLS bool `yaml:"use_tls"` + tlscfg.TLSConfig `yaml:",inline"` +} - client socket.Client - cache collectCache - curCache collectCache +type Unbound struct { + module.Base + Config `yaml:",inline"` - prevCacheMiss float64 // needed for cumulative mode - extChartsCreated bool + client socket.Client + cache collectCache + curCache collectCache - charts *module.Charts - } -) + prevCacheMiss float64 // needed for cumulative mode + extChartsCreated bool -func (Unbound) Cleanup() {} + charts *module.Charts +} + +func (u *Unbound) Configuration() any { + return u.Config +} -func (u *Unbound) Init() bool { +func (u *Unbound) Init() error { if enabled := u.initConfig(); !enabled { - return false + return errors.New("remote control is disabled in the configuration file") } if err := u.initClient(); err != nil { u.Errorf("creating client: %v", err) - return false + return err } u.charts = charts(u.Cumulative) @@ -86,14 +86,23 @@ func (u *Unbound) Init() bool { if u.UseTLS { u.Debugf("using tls_skip_verify: %v, tls_key: %s, tls_cert: %s", u.InsecureSkipVerify, u.TLSKey, u.TLSCert) } - return true + + return nil } -func (u *Unbound) Check() bool { - return len(u.Collect()) > 0 +func (u *Unbound) Check() error { + mx, err := u.collect() + if err != nil { + u.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } -func (u Unbound) Charts() *module.Charts { +func (u *Unbound) Charts() *module.Charts { return u.charts } @@ -108,3 +117,9 @@ func (u *Unbound) Collect() map[string]int64 { } return mx } + +func (u *Unbound) Cleanup() { + if u.client != nil { + _ = u.client.Disconnect() + } +} diff --git a/modules/unbound/unbound_test.go b/modules/unbound/unbound_test.go index fabea299d..05a86a412 100644 --- a/modules/unbound/unbound_test.go +++ b/modules/unbound/unbound_test.go @@ -55,7 +55,7 @@ func TestNew(t *testing.T) { func TestUnbound_Init(t *testing.T) { unbound := nonTLSUnbound() - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) } func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) { @@ -74,7 +74,7 @@ func TestUnbound_Init_SetEverythingFromUnboundConf(t *testing.T) { }, } - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) assert.Equal(t, expectedConfig, unbound.Config) } @@ -82,37 +82,37 @@ func TestUnbound_Init_DisabledInUnboundConf(t *testing.T) { unbound := nonTLSUnbound() unbound.ConfPath = "testdata/unbound_disabled.conf" - assert.False(t, unbound.Init()) + assert.Error(t, unbound.Init()) } func TestUnbound_Init_HandleEmptyConfig(t *testing.T) { unbound := nonTLSUnbound() unbound.ConfPath = "testdata/unbound_empty.conf" - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) } func TestUnbound_Init_HandleNonExistentConfig(t *testing.T) { unbound := nonTLSUnbound() unbound.ConfPath = "testdata/unbound_non_existent.conf" - assert.True(t, unbound.Init()) + assert.NoError(t, unbound.Init()) } func TestUnbound_Check(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: commonStatsData, err: false} - assert.True(t, unbound.Check()) + assert.NoError(t, unbound.Check()) } func TestUnbound_Check_ErrorDuringScrapingUnbound(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{err: true} - assert.False(t, unbound.Check()) + assert.Error(t, unbound.Check()) } func TestUnbound_Cleanup(t *testing.T) { @@ -121,14 +121,14 @@ func TestUnbound_Cleanup(t *testing.T) { func TestUnbound_Charts(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) assert.NotNil(t, unbound.Charts()) } func TestUnbound_Collect(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: commonStatsData, err: false} collected := unbound.Collect() @@ -138,7 +138,7 @@ func TestUnbound_Collect(t *testing.T) { func TestUnbound_Collect_ExtendedStats(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: extStatsData, err: false} collected := unbound.Collect() @@ -158,7 +158,7 @@ func TestUnbound_Collect_LifeCycleCumulativeExtendedStats(t *testing.T) { unbound := nonTLSUnbound() unbound.Cumulative = true - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) ubClient := &mockUnboundClient{err: false} unbound.client = ubClient @@ -186,7 +186,7 @@ func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) { unbound := nonTLSUnbound() unbound.Cumulative = false - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) ubClient := &mockUnboundClient{err: false} unbound.client = ubClient @@ -204,7 +204,7 @@ func TestUnbound_Collect_LifeCycleResetExtendedStats(t *testing.T) { func TestUnbound_Collect_EmptyResponse(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: []byte{}, err: false} assert.Nil(t, unbound.Collect()) @@ -212,7 +212,7 @@ func TestUnbound_Collect_EmptyResponse(t *testing.T) { func TestUnbound_Collect_ErrorResponse(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{data: []byte("error unknown command 'unknown'"), err: false} assert.Nil(t, unbound.Collect()) @@ -220,7 +220,7 @@ func TestUnbound_Collect_ErrorResponse(t *testing.T) { func TestUnbound_Collect_ErrorOnSend(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) unbound.client = mockUnboundClient{err: true} assert.Nil(t, unbound.Collect()) @@ -228,7 +228,7 @@ func TestUnbound_Collect_ErrorOnSend(t *testing.T) { func TestUnbound_Collect_ErrorOnParseBadSyntax(t *testing.T) { unbound := nonTLSUnbound() - require.True(t, unbound.Init()) + require.NoError(t, unbound.Init()) data := strings.Repeat("zk_avg_latency 0\nzk_min_latency 0\nzk_mix_latency 0\n", 10) unbound.client = mockUnboundClient{data: []byte(data), err: false} diff --git a/modules/upsd/client.go b/modules/upsd/client.go index be0148bc5..cf67acdf6 100644 --- a/modules/upsd/client.go +++ b/modules/upsd/client.go @@ -29,9 +29,9 @@ type upsUnit struct { func newUpsdConn(conf Config) upsdConn { return &upsdClient{conn: socket.New(socket.Config{ - ConnectTimeout: conf.Timeout.Duration, - ReadTimeout: conf.Timeout.Duration, - WriteTimeout: conf.Timeout.Duration, + ConnectTimeout: conf.Timeout.Duration(), + ReadTimeout: conf.Timeout.Duration(), + WriteTimeout: conf.Timeout.Duration(), Address: conf.Address, })} } diff --git a/modules/upsd/config_schema.json b/modules/upsd/config_schema.json index 49fc85354..214e0d676 100644 --- a/modules/upsd/config_schema.json +++ b/modules/upsd/config_schema.json @@ -1,29 +1,36 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/upsd job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/upsd job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "address": { + "type": "string" + }, + "username": { + "type": "string" + }, + "password": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + } }, - "address": { - "type": "string" - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - } + "required": [ + "name", + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/upsd/upsd.go b/modules/upsd/upsd.go index ebe0f36bc..131199ead 100644 --- a/modules/upsd/upsd.go +++ b/modules/upsd/upsd.go @@ -3,15 +3,21 @@ package upsd import ( + _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" "github.com/netdata/go.d.plugin/pkg/web" ) +//go:embed "config_schema.json" +var configSchema string + func init() { module.Register("upsd", module.Creator{ - Create: func() module.Module { return New() }, + JobConfigSchema: configSchema, + Create: func() module.Module { return New() }, }) } @@ -19,7 +25,7 @@ func New() *Upsd { return &Upsd{ Config: Config{ Address: "127.0.0.1:3493", - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), }, newUpsdConn: newUpsdConn, charts: &module.Charts{}, @@ -56,17 +62,29 @@ type ( } ) -func (u *Upsd) Init() bool { +func (u *Upsd) Configuration() any { + return u.Config +} + +func (u *Upsd) Init() error { if u.Address == "" { u.Error("config: 'address' not set") - return false + return errors.New("address not set") } - return true + return nil } -func (u *Upsd) Check() bool { - return len(u.Collect()) > 0 +func (u *Upsd) Check() error { + mx, err := u.collect() + if err != nil { + u.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (u *Upsd) Charts() *module.Charts { diff --git a/modules/upsd/upsd_test.go b/modules/upsd/upsd_test.go index 74c8626f1..dfe6ea563 100644 --- a/modules/upsd/upsd_test.go +++ b/modules/upsd/upsd_test.go @@ -19,7 +19,7 @@ func TestUpsd_Cleanup(t *testing.T) { mock := prepareMockConnOK() upsd.newUpsdConn = func(Config) upsdConn { return mock } - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) _ = upsd.Collect() require.NotPanics(t, upsd.Cleanup) assert.True(t, mock.calledDisconnect) @@ -46,9 +46,9 @@ func TestUpsd_Init(t *testing.T) { upsd.Config = test.config if test.wantFail { - assert.False(t, upsd.Init()) + assert.Error(t, upsd.Init()) } else { - assert.True(t, upsd.Init()) + assert.NoError(t, upsd.Init()) } }) } @@ -92,12 +92,12 @@ func TestUpsd_Check(t *testing.T) { upsd := test.prepareUpsd() upsd.newUpsdConn = func(Config) upsdConn { return test.prepareMock() } - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) if test.wantFail { - assert.False(t, upsd.Check()) + assert.Error(t, upsd.Check()) } else { - assert.True(t, upsd.Check()) + assert.NoError(t, upsd.Check()) } }) } @@ -105,7 +105,7 @@ func TestUpsd_Check(t *testing.T) { func TestUpsd_Charts(t *testing.T) { upsd := New() - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) assert.NotNil(t, upsd.Charts()) } @@ -225,7 +225,7 @@ func TestUpsd_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { upsd := test.prepareUpsd() - require.True(t, upsd.Init()) + require.NoError(t, upsd.Init()) mock := test.prepareMock() upsd.newUpsdConn = func(Config) upsdConn { return mock } diff --git a/modules/vcsa/config_schema.json b/modules/vcsa/config_schema.json index aab0647ab..003771a1b 100644 --- a/modules/vcsa/config_schema.json +++ b/modules/vcsa/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/vcsa job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "vCenter Server Appliance collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the VCSA server.", + "type": "string", + "default": "" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/vcsa/vcsa.go b/modules/vcsa/vcsa.go index ccac96f3a..1ba3b030e 100644 --- a/modules/vcsa/vcsa.go +++ b/modules/vcsa/vcsa.go @@ -4,6 +4,7 @@ package vcsa import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/web" @@ -29,7 +30,7 @@ func New() *VCSA { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -38,13 +39,15 @@ func New() *VCSA { } type Config struct { - web.HTTP `yaml:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + + web.HTTP `yaml:",inline" json:",inline"` } type ( VCSA struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` client healthClient @@ -66,33 +69,47 @@ type ( } ) -func (vc *VCSA) Init() bool { +func (vc *VCSA) Configuration() any { + return vc.Config +} + +func (vc *VCSA) Init() error { if err := vc.validateConfig(); err != nil { vc.Error(err) - return false + return err } c, err := vc.initHealthClient() if err != nil { vc.Errorf("error on creating health client : %vc", err) - return false + return err } vc.client = c vc.Debugf("using URL %s", vc.URL) - vc.Debugf("using timeout: %s", vc.Timeout.Duration) + vc.Debugf("using timeout: %s", vc.Timeout) - return true + return nil } -func (vc *VCSA) Check() bool { +func (vc *VCSA) Check() error { err := vc.client.Login() if err != nil { vc.Error(err) - return false + return err + } + + mx, err := vc.collect() + if err != nil { + vc.Error(err) + return err + } + + if len(mx) == 0 { + return errors.New("no metrics collected") } - return len(vc.Collect()) > 0 + return nil } func (vc *VCSA) Charts() *module.Charts { diff --git a/modules/vcsa/vcsa_test.go b/modules/vcsa/vcsa_test.go index 86185bfa2..7ecade8e3 100644 --- a/modules/vcsa/vcsa_test.go +++ b/modules/vcsa/vcsa_test.go @@ -27,54 +27,54 @@ func TestNew(t *testing.T) { func TestVCSA_Init(t *testing.T) { job := testNewVCSA() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.client) } func TestVCenter_InitErrorOnValidatingInitParameters(t *testing.T) { job := New() - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestVCenter_InitErrorOnCreatingClient(t *testing.T) { job := testNewVCSA() job.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestVCenter_Check(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = &mockVCenterHealthClient{} - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) } func TestVCenter_CheckErrorOnLogin(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.client = &mockVCenterHealthClient{ login: func() error { return errors.New("login mock error") }, } - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestVCenter_CheckEnsureLoggedIn(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) assert.True(t, mock.loginCalls == 1) } func TestVCenter_Cleanup(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock job.Cleanup() @@ -94,7 +94,7 @@ func TestVCenter_Charts(t *testing.T) { func TestVCenter_Collect(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock @@ -153,7 +153,7 @@ func TestVCenter_Collect(t *testing.T) { func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{} job.client = mock job.Collect() @@ -163,7 +163,7 @@ func TestVCenter_CollectEnsurePingIsCalled(t *testing.T) { func TestVCenter_CollectErrorOnPing(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{ ping: func() error { return errors.New("ping mock error") }, } @@ -174,7 +174,7 @@ func TestVCenter_CollectErrorOnPing(t *testing.T) { func TestVCenter_CollectErrorOnHealthCalls(t *testing.T) { job := testNewVCSA() - require.True(t, job.Init()) + require.NoError(t, job.Init()) mock := &mockVCenterHealthClient{ applMgmt: func() (string, error) { return "", errors.New("applMgmt mock error") }, databaseStorage: func() (string, error) { return "", errors.New("databaseStorage mock error") }, diff --git a/modules/vernemq/config_schema.json b/modules/vernemq/config_schema.json index f21bab451..fb214da35 100644 --- a/modules/vernemq/config_schema.json +++ b/modules/vernemq/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/vernemq job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "VerneMQ collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the VerneMQ metrics endpoint.", + "type": "string", + "default": "http://127.0.0.1:8888/metrics" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 1 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 1 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/vernemq/init.go b/modules/vernemq/init.go new file mode 100644 index 000000000..573b736ed --- /dev/null +++ b/modules/vernemq/init.go @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package vernemq + +import ( + "errors" + + "github.com/netdata/go.d.plugin/pkg/prometheus" + "github.com/netdata/go.d.plugin/pkg/web" +) + +func (v *VerneMQ) validateConfig() error { + if v.URL == "" { + return errors.New("url is not set") + } + return nil +} + +func (v *VerneMQ) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(v.Client) + if err != nil { + return nil, err + } + + return prometheus.New(client, v.Request), nil +} diff --git a/modules/vernemq/vernemq.go b/modules/vernemq/vernemq.go index d86f3b118..bb318eb9a 100644 --- a/modules/vernemq/vernemq.go +++ b/modules/vernemq/vernemq.go @@ -24,32 +24,32 @@ func init() { } func New() *VerneMQ { - config := Config{ - HTTP: web.HTTP{ - Request: web.Request{ - URL: "http://127.0.0.1:8888/metrics", - }, - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second}, + return &VerneMQ{ + Config: Config{ + HTTP: web.HTTP{ + Request: web.Request{ + URL: "http://127.0.0.1:8888/metrics", + }, + Client: web.Client{ + Timeout: web.Duration(time.Second), + }, }, }, - } - - return &VerneMQ{ - Config: config, charts: charts.Copy(), cache: make(cache), } } -type ( - Config struct { - web.HTTP `yaml:",inline"` - } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` + web.HTTP `yaml:",inline" json:",inline"` +} + +type ( VerneMQ struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` prom prometheus.Prometheus charts *Charts @@ -61,37 +61,36 @@ type ( func (c cache) hasP(v string) bool { ok := c[v]; c[v] = true; return ok } -func (v VerneMQ) validateConfig() error { - if v.URL == "" { - return errors.New("URL is not set") - } - return nil +func (v *VerneMQ) Configuration() any { + return v.Config } -func (v *VerneMQ) initClient() error { - client, err := web.NewHTTPClient(v.Client) +func (v *VerneMQ) Init() error { + if err := v.validateConfig(); err != nil { + v.Errorf("error on validating config: %v", err) + return err + } + + prom, err := v.initPrometheusClient() if err != nil { + v.Error(err) return err } + v.prom = prom - v.prom = prometheus.New(client, v.Request) return nil } -func (v *VerneMQ) Init() bool { - if err := v.validateConfig(); err != nil { - v.Errorf("error on validating config: %v", err) - return false +func (v *VerneMQ) Check() error { + mx, err := v.collect() + if err != nil { + v.Error(err) + return err } - if err := v.initClient(); err != nil { - v.Errorf("error on initializing client: %v", err) - return false + if len(mx) == 0 { + return errors.New("no metrics collected") } - return true -} - -func (v *VerneMQ) Check() bool { - return len(v.Collect()) > 0 + return nil } func (v *VerneMQ) Charts() *Charts { @@ -110,4 +109,8 @@ func (v *VerneMQ) Collect() map[string]int64 { return mx } -func (VerneMQ) Cleanup() {} +func (v *VerneMQ) Cleanup() { + if v.prom != nil && v.prom.HTTPClient() != nil { + v.prom.HTTPClient().CloseIdleConnections() + } +} diff --git a/modules/vernemq/vernemq_test.go b/modules/vernemq/vernemq_test.go index 5f07553cd..a86c20568 100644 --- a/modules/vernemq/vernemq_test.go +++ b/modules/vernemq/vernemq_test.go @@ -29,43 +29,43 @@ func TestNew(t *testing.T) { func TestVerneMQ_Init(t *testing.T) { verneMQ := prepareVerneMQ() - assert.True(t, verneMQ.Init()) + assert.NoError(t, verneMQ.Init()) } func TestVerneMQ_Init_ReturnsFalseIfURLIsNotSet(t *testing.T) { verneMQ := prepareVerneMQ() verneMQ.URL = "" - assert.False(t, verneMQ.Init()) + assert.Error(t, verneMQ.Init()) } func TestVerneMQ_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { verneMQ := prepareVerneMQ() verneMQ.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, verneMQ.Init()) + assert.Error(t, verneMQ.Init()) } func TestVerneMQ_Check(t *testing.T) { verneMQ, srv := prepareClientServerV1101(t) defer srv.Close() - assert.True(t, verneMQ.Check()) + assert.NoError(t, verneMQ.Check()) } func TestVerneMQ_Check_ReturnsFalseIfConnectionRefused(t *testing.T) { verneMQ := prepareVerneMQ() - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) - assert.False(t, verneMQ.Check()) + assert.Error(t, verneMQ.Check()) } func TestVerneMQ_Check_ReturnsFalseIfMetricsAreNotVerneMQ(t *testing.T) { verneMQ, srv := prepareClientServerNotVerneMQ(t) defer srv.Close() - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) - assert.False(t, verneMQ.Check()) + assert.Error(t, verneMQ.Check()) } func TestVerneMQ_Charts(t *testing.T) { @@ -87,7 +87,7 @@ func TestVerneMQ_Collect(t *testing.T) { func TestVerneMQ_Collect_ReturnsNilIfConnectionRefused(t *testing.T) { verneMQ := prepareVerneMQ() - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) assert.Nil(t, verneMQ.Collect()) } @@ -145,7 +145,7 @@ func prepareClientServerV1101(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } @@ -159,7 +159,7 @@ func prepareClientServerNotVerneMQ(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } @@ -173,7 +173,7 @@ func prepareClientServerInvalid(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } @@ -187,7 +187,7 @@ func prepareClientServerResponse404(t *testing.T) (*VerneMQ, *httptest.Server) { verneMQ := New() verneMQ.URL = ts.URL - require.True(t, verneMQ.Init()) + require.NoError(t, verneMQ.Init()) return verneMQ, ts } diff --git a/modules/vsphere/config_schema.json b/modules/vsphere/config_schema.json index 68bd55e1e..7026dc629 100644 --- a/modules/vsphere/config_schema.json +++ b/modules/vsphere/config_schema.json @@ -1,77 +1,84 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/vsphere job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "discovery_interval": { - "type": [ - "string", - "integer" - ] - }, - "host_include": { - "type": "array", - "items": { + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/vsphere job configuration schema.", + "type": "object", + "properties": { + "name": { "type": "string" - } - }, - "vm_include": { - "type": "array", - "items": { + }, + "url": { "type": "string" - } - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" - }, - "headers": { - "type": "object", - "additionalProperties": { + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "discovery_interval": { + "type": [ + "string", + "integer" + ] + }, + "host_include": { + "type": "array", + "items": { + "type": "string" + } + }, + "vm_include": { + "type": "array", + "items": { + "type": "string" + } + }, + "username": { "type": "string" + }, + "password": { + "type": "string" + }, + "proxy_url": { + "type": "string" + }, + "proxy_username": { + "type": "string" + }, + "proxy_password": { + "type": "string" + }, + "headers": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "not_follow_redirects": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" } }, - "not_follow_redirects": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "name", + "url" + ] }, - "required": [ - "name", - "url" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/vsphere/discover.go b/modules/vsphere/discover.go index 65555a73b..1ea0a4d6e 100644 --- a/modules/vsphere/discover.go +++ b/modules/vsphere/discover.go @@ -14,7 +14,7 @@ func (vs *VSphere) goDiscovery() { vs.Errorf("error on discovering : %v", err) } } - vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration) + vs.discoveryTask = newTask(job, vs.DiscoveryInterval.Duration()) } func (vs *VSphere) discoverOnce() error { diff --git a/modules/vsphere/init.go b/modules/vsphere/init.go index a0f966220..c17029a6c 100644 --- a/modules/vsphere/init.go +++ b/modules/vsphere/init.go @@ -30,7 +30,7 @@ func (vs *VSphere) initClient() (*client.Client, error) { URL: vs.URL, User: vs.Username, Password: vs.Password, - Timeout: vs.Timeout.Duration, + Timeout: vs.Timeout.Duration(), TLSConfig: vs.Client.TLSConfig, } return client.New(config) diff --git a/modules/vsphere/vsphere.go b/modules/vsphere/vsphere.go index d7af8a495..b5a063a18 100644 --- a/modules/vsphere/vsphere.go +++ b/modules/vsphere/vsphere.go @@ -29,20 +29,18 @@ func init() { } func New() *VSphere { - config := Config{ - HTTP: web.HTTP{ - Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 20}, + return &VSphere{ + Config: Config{ + HTTP: web.HTTP{ + Client: web.Client{ + Timeout: web.Duration(time.Second * 20), + }, }, + DiscoveryInterval: web.Duration(time.Minute * 5), + HostsInclude: []string{"/*"}, + VMsInclude: []string{"/*"}, }, - DiscoveryInterval: web.Duration{Duration: time.Minute * 5}, - HostsInclude: []string{"/*"}, - VMsInclude: []string{"/*"}, - } - - return &VSphere{ - collectionLock: new(sync.RWMutex), - Config: config, + collectionLock: &sync.RWMutex{}, charts: &module.Charts{}, discoveredHosts: make(map[string]int), discoveredVMs: make(map[string]int), @@ -83,39 +81,41 @@ type ( } ) -func (vs *VSphere) Init() bool { +func (vs *VSphere) Configuration() any { + return vs.Config +} + +func (vs *VSphere) Init() error { if err := vs.validateConfig(); err != nil { vs.Errorf("error on validating config: %v", err) - return false + return err } vsClient, err := vs.initClient() if err != nil { vs.Errorf("error on creating vsphere client: %v", err) - return false + return err } - err = vs.initDiscoverer(vsClient) - if err != nil { + if err := vs.initDiscoverer(vsClient); err != nil { vs.Errorf("error on creating vsphere discoverer: %v", err) - return false + return err } vs.initScraper(vsClient) - err = vs.discoverOnce() - if err != nil { + if err := vs.discoverOnce(); err != nil { vs.Errorf("error on discovering: %v", err) - return false + return err } vs.goDiscovery() - return true + return nil } -func (vs *VSphere) Check() bool { - return true +func (vs *VSphere) Check() error { + return nil } func (vs *VSphere) Charts() *module.Charts { diff --git a/modules/vsphere/vsphere_test.go b/modules/vsphere/vsphere_test.go index 97c23d5ba..746082bb9 100644 --- a/modules/vsphere/vsphere_test.go +++ b/modules/vsphere/vsphere_test.go @@ -10,6 +10,7 @@ import ( "github.com/netdata/go.d.plugin/modules/vsphere/discover" "github.com/netdata/go.d.plugin/modules/vsphere/match" rs "github.com/netdata/go.d.plugin/modules/vsphere/resources" + "github.com/netdata/go.d.plugin/pkg/web" "github.com/netdata/go.d.plugin/agent/module" "github.com/stretchr/testify/assert" @@ -28,7 +29,7 @@ func TestVSphere_Init(t *testing.T) { vSphere, _, teardown := prepareVSphereSim(t) defer teardown() - assert.True(t, vSphere.Init()) + assert.NoError(t, vSphere.Init()) assert.NotNil(t, vSphere.discoverer) assert.NotNil(t, vSphere.scraper) assert.NotNil(t, vSphere.resources) @@ -41,7 +42,7 @@ func TestVSphere_Init_ReturnsFalseIfURLNotSet(t *testing.T) { defer teardown() vSphere.URL = "" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) { @@ -49,7 +50,7 @@ func TestVSphere_Init_ReturnsFalseIfUsernameNotSet(t *testing.T) { defer teardown() vSphere.Username = "" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) { @@ -57,7 +58,7 @@ func TestVSphere_Init_ReturnsFalseIfPasswordNotSet(t *testing.T) { defer teardown() vSphere.Password = "" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { @@ -65,7 +66,7 @@ func TestVSphere_Init_ReturnsFalseIfClientWrongTLSCA(t *testing.T) { defer teardown() vSphere.Client.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) { @@ -73,7 +74,7 @@ func TestVSphere_Init_ReturnsFalseIfConnectionRefused(t *testing.T) { defer teardown() vSphere.URL = "http://127.0.0.1:32001" - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) { @@ -81,16 +82,16 @@ func TestVSphere_Init_ReturnsFalseIfInvalidHostVMIncludeFormat(t *testing.T) { defer teardown() vSphere.HostsInclude = match.HostIncludes{"invalid"} - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) vSphere.HostsInclude = vSphere.HostsInclude[:0] vSphere.VMsInclude = match.VMIncludes{"invalid"} - assert.False(t, vSphere.Init()) + assert.Error(t, vSphere.Init()) } func TestVSphere_Check(t *testing.T) { - assert.NotNil(t, New().Check()) + assert.NoError(t, New().Check()) } func TestVSphere_Charts(t *testing.T) { @@ -101,7 +102,7 @@ func TestVSphere_Cleanup(t *testing.T) { vSphere, _, teardown := prepareVSphereSim(t) defer teardown() - require.True(t, vSphere.Init()) + require.NoError(t, vSphere.Init()) vSphere.Cleanup() time.Sleep(time.Second) @@ -117,7 +118,7 @@ func TestVSphere_Collect(t *testing.T) { vSphere, model, teardown := prepareVSphereSim(t) defer teardown() - require.True(t, vSphere.Init()) + require.NoError(t, vSphere.Init()) vSphere.scraper = mockScraper{vSphere.scraper} @@ -332,8 +333,8 @@ func TestVSphere_Collect_RemoveHostsVMsInRuntime(t *testing.T) { vSphere, _, teardown := prepareVSphereSim(t) defer teardown() - require.True(t, vSphere.Init()) - require.True(t, vSphere.Check()) + require.NoError(t, vSphere.Init()) + require.NoError(t, vSphere.Check()) okHostID := "host-50" okVMID := "vm-64" @@ -387,9 +388,9 @@ func TestVSphere_Collect_Run(t *testing.T) { vSphere, model, teardown := prepareVSphereSim(t) defer teardown() - vSphere.DiscoveryInterval.Duration = time.Second * 2 - require.True(t, vSphere.Init()) - require.True(t, vSphere.Check()) + vSphere.DiscoveryInterval = web.Duration(time.Second * 2) + require.NoError(t, vSphere.Init()) + require.NoError(t, vSphere.Check()) runs := 20 for i := 0; i < runs; i++ { diff --git a/modules/weblog/config_schema.json b/modules/weblog/config_schema.json index 82b6c358c..04cd5df34 100644 --- a/modules/weblog/config_schema.json +++ b/modules/weblog/config_schema.json @@ -1,208 +1,177 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/web_log job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "properties": { + "path": { + "title": "Log file path", + "description": "Path to the web server log file.", + "type": "string" + }, + "exclude_path": { + "title": "Exclude path", + "description": "Pattern to exclude log files.", + "type": "string", + "default": "*.gz" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "minimum": 1, + "default": 1, + "type": "integer" + }, + "log_type": { + "title": "Log parser", + "type": "string", + "enum": [ + "csv", + "regular expression", + "json", + "ltsv", + "auto" + ], + "default": "csv" + } }, - "parser": { - "type": "object", - "properties": { - "log_type": { - "type": "string" - }, - "csv_config": { - "type": "object", - "properties": { - "fields_per_record": { - "type": "integer" - }, - "delimiter": { - "type": "string" - }, - "trim_leading_space": { - "type": "boolean" + "required": [ + "path", + "log_type" + ], + "dependencies": { + "log_type": { + "oneOf": [ + { + "properties": { + "log_type": { + "const": "csv" + }, + "csv_config": { + "title": "CSV parser configuration", + "type": "object", + "properties": { + "format": { + "title": "Format", + "description": "Log format.", + "type": "string", + "default": "$remote_addr - - [$time_local] \"$request\" $status $body_bytes_sent" + }, + "delimiter": { + "title": "Delimiter", + "description": "Fields delimiter. The default is a space.", + "type": "string", + "default": " " + } + }, + "required": [ + "format", + "delimiter" + ] + } }, - "format": { - "type": "string" - } + "required": [ + "csv_config" + ] }, - "required": [ - "fields_per_record", - "delimiter", - "trim_leading_space", - "format" - ] - }, - "ltsv_config": { - "type": "object", - "properties": { - "field_delimiter": { - "type": "string" - }, - "value_delimiter": { - "type": "string" - }, - "mapping": { - "type": "object", - "additionalProperties": { - "type": "string" + { + "properties": { + "log_type": { + "const": "regular expression" + }, + "regexp_config": { + "title": "Regexp parser configuration", + "type": "object", + "properties": { + "pattern": { + "title": "Pattern with named groups", + "description": "Regular expression pattern with named groups. Use pattern with subexpressions names. These names should be **known fields**.", + "type": "string", + "default": "" + } + }, + "required": [ + "pattern" + ] } - } - }, - "required": [ - "field_delimiter", - "value_delimiter", - "mapping" - ] - }, - "regexp_config": { - "type": "object", - "properties": { - "pattern": { - "type": "string" - } + }, + "required": [ + "regexp_config" + ] }, - "required": [ - "pattern" - ] - }, - "json_config": { - "type": "object", - "properties": { - "mapping": { - "type": "object", - "additionalProperties": { - "type": "string" + { + "properties": { + "log_type": { + "const": "json" + }, + "json_config": { + "title": "JSON parser configuration", + "type": "object", + "properties": { + "mapping": { + "title": "Field to value mapping", + "description": "The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding *known field*.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } } } }, - "required": [ - "mapping" - ] - } - }, - "required": [ - "log_type" - ] - }, - "path": { - "type": "string" - }, - "exclude_path": { - "type": "string" - }, - "url_patterns": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "match": { - "type": "string" - } - }, - "required": [ - "name", - "match" - ] - } - }, - "custom_fields": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "patterns": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "match": { - "type": "string" - } + { + "properties": { + "log_type": { + "const": "ltsv" }, - "required": [ - "name", - "match" - ] + "ltsv_config": { + "title": "LTSV parser configuration", + "type": "object", + "properties": { + "field_delimiter": { + "title": "Field delimiter", + "description": "The delimiter of fields. It defaults to '\\t'.", + "type": "string", + "default": "\t" + }, + "value_delimiter": { + "title": "Value delimiter", + "description": "The delimiter of label-value pairs.", + "type": "string", + "default": ":" + }, + "mapping": { + "title": "Field to value mapping", + "description": "The mapping is a dictionary where the key is a field, as in logs, and the value is the corresponding *known field*.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + } } - } - }, - "required": [ - "name", - "patterns" - ] - } - }, - "custom_time_fields": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" }, - "histogram": { - "type": "array", - "items": { - "type": "number" + { + "properties": { + "log_type": { + "const": "auto" + } } } - }, - "required": [ - "name", - "histogram" - ] - } - }, - "custom_numeric_fields": { - "type": "array", - "items": { - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "units": { - "type": "string" - }, - "multiplier": { - "type": "integer" - }, - "divisor": { - "type": "integer" - } - }, - "required": [ - "name", - "units", - "multiplier", - "divisor" ] } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "histogram": { - "type": "array", - "items": { - "type": "number" + "log_type": { + "ui:widget": "radio", + "ui:options": { + "inline": true } - }, - "group_response_codes": { - "type": "boolean" } - }, - "required": [ - "name", - "path" - ] + } } diff --git a/modules/weblog/weblog.go b/modules/weblog/weblog.go index 27bf43f9a..9926567c7 100644 --- a/modules/weblog/weblog.go +++ b/modules/weblog/weblog.go @@ -45,33 +45,34 @@ func New() *WebLog { type ( Config struct { - Parser logs.ParserConfig `yaml:",inline"` - Path string `yaml:"path"` - ExcludePath string `yaml:"exclude_path"` - URLPatterns []userPattern `yaml:"url_patterns"` - CustomFields []customField `yaml:"custom_fields"` - CustomTimeFields []customTimeField `yaml:"custom_time_fields"` - CustomNumericFields []customNumericField `yaml:"custom_numeric_fields"` - Histogram []float64 `yaml:"histogram"` - GroupRespCodes bool `yaml:"group_response_codes"` + UpdateEvery int `yaml:"update_every" json:"update_every"` + Parser logs.ParserConfig `yaml:",inline" json:",inline"` + Path string `yaml:"path" json:"path"` + ExcludePath string `yaml:"exclude_path" json:"exclude_path"` + URLPatterns []userPattern `yaml:"url_patterns" json:"url_patterns"` + CustomFields []customField `yaml:"custom_fields" json:"custom_fields"` + CustomTimeFields []customTimeField `yaml:"custom_time_fields" json:"custom_time_fields"` + CustomNumericFields []customNumericField `yaml:"custom_numeric_fields" json:"custom_numeric_fields"` + Histogram []float64 `yaml:"histogram" json:"histogram"` + GroupRespCodes bool `yaml:"group_response_codes" json:"group_resp_codes"` } userPattern struct { - Name string `yaml:"name"` - Match string `yaml:"match"` + Name string `yaml:"name" json:"name"` + Match string `yaml:"match" json:"match"` } customField struct { - Name string `yaml:"name"` - Patterns []userPattern `yaml:"patterns"` + Name string `yaml:"name" json:"name"` + Patterns []userPattern `yaml:"patterns" json:"patterns"` } customTimeField struct { - Name string `yaml:"name"` - Histogram []float64 `yaml:"histogram"` + Name string `yaml:"name" json:"name"` + Histogram []float64 `yaml:"histogram" json:"histogram"` } customNumericField struct { - Name string `yaml:"name"` - Units string `yaml:"units"` - Multiplier int `yaml:"multiplier"` - Divisor int `yaml:"divisor"` + Name string `yaml:"name" json:"name"` + Units string `yaml:"units" json:"units"` + Multiplier int `yaml:"multiplier" json:"multiplier"` + Divisor int `yaml:"divisor" json:"divisor"` } ) @@ -92,20 +93,24 @@ type WebLog struct { mx *metricsData } -func (w *WebLog) Init() bool { +func (w *WebLog) Configuration() any { + return w.Config +} + +func (w *WebLog) Init() error { if err := w.createURLPatterns(); err != nil { w.Errorf("init failed: %v", err) - return false + return err } if err := w.createCustomFields(); err != nil { w.Errorf("init failed: %v", err) - return false + return err } if err := w.createCustomTimeFields(); err != nil { w.Errorf("init failed: %v", err) - return false + return err } if err := w.createCustomNumericFields(); err != nil { @@ -115,26 +120,27 @@ func (w *WebLog) Init() bool { w.createLogLine() w.mx = newMetricsData(w.Config) - return true + return nil } -func (w *WebLog) Check() bool { +func (w *WebLog) Check() error { // Note: these inits are here to make auto-detection retry working if err := w.createLogReader(); err != nil { w.Warning("check failed: ", err) - return false + return err } if err := w.createParser(); err != nil { w.Warning("check failed: ", err) - return false + return err } if err := w.createCharts(w.line); err != nil { w.Warning("check failed: ", err) - return false + return err } - return true + + return nil } func (w *WebLog) Charts() *module.Charts { diff --git a/modules/weblog/weblog_test.go b/modules/weblog/weblog_test.go index 6195d2e49..e65a46e21 100644 --- a/modules/weblog/weblog_test.go +++ b/modules/weblog/weblog_test.go @@ -42,48 +42,48 @@ func TestNew(t *testing.T) { func TestWebLog_Init(t *testing.T) { weblog := New() - assert.True(t, weblog.Init()) + assert.NoError(t, weblog.Init()) } func TestWebLog_Init_ErrorOnCreatingURLPatterns(t *testing.T) { weblog := New() weblog.URLPatterns = []userPattern{{Match: "* !*"}} - assert.False(t, weblog.Init()) + assert.Error(t, weblog.Init()) } func TestWebLog_Init_ErrorOnCreatingCustomFields(t *testing.T) { weblog := New() weblog.CustomFields = []customField{{Patterns: []userPattern{{Name: "p1", Match: "* !*"}}}} - assert.False(t, weblog.Init()) + assert.Error(t, weblog.Init()) } func TestWebLog_Check(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/common.log" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.True(t, weblog.Check()) + assert.NoError(t, weblog.Check()) } func TestWebLog_Check_ErrorOnCreatingLogReaderNoLogFile(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/not_exists.log" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.False(t, weblog.Check()) + assert.Error(t, weblog.Check()) } func TestWebLog_Check_ErrorOnCreatingParserUnknownFormat(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/custom.log" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.False(t, weblog.Check()) + assert.Error(t, weblog.Check()) } func TestWebLog_Check_ErrorOnCreatingParserEmptyLine(t *testing.T) { @@ -92,17 +92,17 @@ func TestWebLog_Check_ErrorOnCreatingParserEmptyLine(t *testing.T) { weblog.Path = "testdata/custom.log" weblog.Parser.LogType = logs.TypeCSV weblog.Parser.CSV.Format = "$one $two" - require.True(t, weblog.Init()) + require.NoError(t, weblog.Init()) - assert.False(t, weblog.Check()) + assert.Error(t, weblog.Check()) } func TestWebLog_Charts(t *testing.T) { weblog := New() defer weblog.Cleanup() weblog.Path = "testdata/common.log" - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) assert.NotNil(t, weblog.Charts()) } @@ -1187,8 +1187,8 @@ func prepareWebLogCollectFull(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testFullLog)) @@ -1230,8 +1230,8 @@ func prepareWebLogCollectCommon(t *testing.T) *WebLog { weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCommonLog)) @@ -1282,8 +1282,8 @@ func prepareWebLogCollectCustom(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomLog)) @@ -1328,8 +1328,8 @@ func prepareWebLogCollectCustomTimeFields(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog)) @@ -1374,8 +1374,8 @@ func prepareWebLogCollectCustomNumericFields(t *testing.T) *WebLog { } weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testCustomTimeFieldLog)) @@ -1424,8 +1424,8 @@ func prepareWebLogCollectIISFields(t *testing.T) *WebLog { weblog := New() weblog.Config = cfg - require.True(t, weblog.Init()) - require.True(t, weblog.Check()) + require.NoError(t, weblog.Init()) + require.NoError(t, weblog.Check()) defer weblog.Cleanup() p, err := logs.NewCSVParser(weblog.Parser.CSV, bytes.NewReader(testIISLog)) diff --git a/modules/whoisquery/config_schema.json b/modules/whoisquery/config_schema.json index 9f5131789..fb44531c3 100644 --- a/modules/whoisquery/config_schema.json +++ b/modules/whoisquery/config_schema.json @@ -1,29 +1,36 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/whoisquery job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/whoisquery job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "source": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "days_until_expiration_warning": { + "type": "integer" + }, + "days_until_expiration_critical": { + "type": "integer" + } }, - "source": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "days_until_expiration_warning": { - "type": "integer" - }, - "days_until_expiration_critical": { - "type": "integer" - } + "required": [ + "name", + "source" + ] }, - "required": [ - "name", - "source" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/whoisquery/provider.go b/modules/whoisquery/provider.go index 71318dd81..032f979f4 100644 --- a/modules/whoisquery/provider.go +++ b/modules/whoisquery/provider.go @@ -23,7 +23,7 @@ type fromNet struct { func newProvider(config Config) (provider, error) { domain := config.Source client := whois.NewClient() - client.SetTimeout(config.Timeout.Duration) + client.SetTimeout(config.Timeout.Duration()) return &fromNet{ domainAddress: domain, diff --git a/modules/whoisquery/whoisquery.go b/modules/whoisquery/whoisquery.go index 6265b4fb6..e803dbf99 100644 --- a/modules/whoisquery/whoisquery.go +++ b/modules/whoisquery/whoisquery.go @@ -4,6 +4,7 @@ package whoisquery import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -26,7 +27,7 @@ func init() { func New() *WhoisQuery { return &WhoisQuery{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), DaysUntilWarn: 90, DaysUntilCrit: 30, }, @@ -49,26 +50,38 @@ type WhoisQuery struct { prov provider } -func (w *WhoisQuery) Init() bool { +func (w *WhoisQuery) Configuration() any { + return w.Config +} + +func (w *WhoisQuery) Init() error { if err := w.validateConfig(); err != nil { w.Errorf("config validation: %v", err) - return false + return err } prov, err := w.initProvider() if err != nil { w.Errorf("init whois provider: %v", err) - return false + return err } w.prov = prov w.charts = w.initCharts() - return true + return nil } -func (w *WhoisQuery) Check() bool { - return len(w.Collect()) > 0 +func (w *WhoisQuery) Check() error { + mx, err := w.collect() + if err != nil { + w.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (w *WhoisQuery) Charts() *module.Charts { diff --git a/modules/whoisquery/whoisquery_test.go b/modules/whoisquery/whoisquery_test.go index 1f3c827bd..0fe6c01c8 100644 --- a/modules/whoisquery/whoisquery_test.go +++ b/modules/whoisquery/whoisquery_test.go @@ -17,7 +17,7 @@ func TestWhoisQuery_Cleanup(t *testing.T) { func TestWhoisQuery_Charts(t *testing.T) { whoisquery := New() whoisquery.Source = "example.com" - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) assert.NotNil(t, whoisquery.Charts()) } @@ -45,9 +45,9 @@ func TestWhoisQuery_Init(t *testing.T) { whoisquery.Config = test.config if test.err { - assert.False(t, whoisquery.Init()) + assert.Error(t, whoisquery.Init()) } else { - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) var typeOK bool if test.providerType == net { @@ -64,20 +64,20 @@ func TestWhoisQuery_Check(t *testing.T) { whoisquery := New() whoisquery.prov = &mockProvider{remTime: 12345.678} - assert.True(t, whoisquery.Check()) + assert.NoError(t, whoisquery.Check()) } func TestWhoisQuery_Check_ReturnsFalseOnProviderError(t *testing.T) { whoisquery := New() whoisquery.prov = &mockProvider{err: true} - assert.False(t, whoisquery.Check()) + assert.Error(t, whoisquery.Check()) } func TestWhoisQuery_Collect(t *testing.T) { whoisquery := New() whoisquery.Source = "example.com" - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) whoisquery.prov = &mockProvider{remTime: 12345} collected := whoisquery.Collect() @@ -96,7 +96,7 @@ func TestWhoisQuery_Collect(t *testing.T) { func TestWhoisQuery_Collect_ReturnsNilOnProviderError(t *testing.T) { whoisquery := New() whoisquery.Source = "example.com" - require.True(t, whoisquery.Init()) + require.NoError(t, whoisquery.Init()) whoisquery.prov = &mockProvider{err: true} assert.Nil(t, whoisquery.Collect()) diff --git a/modules/windows/config_schema.json b/modules/windows/config_schema.json index 1668dd905..3c9656bb7 100644 --- a/modules/windows/config_schema.json +++ b/modules/windows/config_schema.json @@ -1,59 +1,244 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/windows job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - }, - "url": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "username": { - "type": "string" - }, - "password": { - "type": "string" - }, - "proxy_url": { - "type": "string" - }, - "proxy_username": { - "type": "string" - }, - "proxy_password": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Windows collector configuration.", + "type": "object", + "properties": { + "configuration_section": { + "title": "Configuration", + "type": "string", + "enum": [ + "base", + "auth", + "tls", + "proxy", + "headers", + "all" + ], + "default": "base" + } }, - "headers": { - "type": "object", - "additionalProperties": { - "type": "string" + "dependencies": { + "configuration_section": { + "oneOf": [ + { + "properties": { + "configuration_section": { + "const": "base" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "auth" + } + }, + "allOf": [ + { + "$ref": "#/definitions/authSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "proxy" + } + }, + "allOf": [ + { + "$ref": "#/definitions/proxySectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "tls" + } + }, + "allOf": [ + { + "$ref": "#/definitions/tlsSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "headers" + } + }, + "allOf": [ + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + }, + { + "properties": { + "configuration_section": { + "const": "all" + } + }, + "allOf": [ + { + "$ref": "#/definitions/baseSectionConfig" + }, + { + "$ref": "#/definitions/authSectionConfig" + }, + { + "$ref": "#/definitions/proxySectionConfig" + }, + { + "$ref": "#/definitions/tlsSectionConfig" + }, + { + "$ref": "#/definitions/headersSectionConfig" + } + ] + } + ] } }, - "not_follow_redirects": { - "type": "boolean" + "definitions": { + "baseSectionConfig": { + "type": "object", + "properties": { + "url": { + "title": "URL", + "description": "The URL of the Windows exporter metrics endpoint.", + "type": "string", + "default": "" + }, + "update_every": { + "title": "Update every", + "description": "The data collection frequency in seconds.", + "type": "integer", + "minimum": 1, + "default": 5 + }, + "timeout": { + "title": "Timeout", + "description": "The timeout in seconds for the HTTP request.", + "type": "number", + "minimum": 0.5, + "default": 5 + }, + "not_follow_redirects": { + "title": "Not follow redirects", + "description": "If set to true, the client will not follow HTTP redirects automatically.", + "type": "boolean" + } + }, + "required": [ + "url" + ] + }, + "authSectionConfig": { + "type": "object", + "properties": { + "username": { + "title": "Username", + "description": "The username for basic authentication (if required).", + "type": "string", + "sensitive": true + }, + "password": { + "title": "Password", + "description": "The password for basic authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "proxySectionConfig": { + "type": "object", + "properties": { + "proxy_url": { + "title": "Proxy URL", + "description": "The URL of the proxy server (if required).", + "type": "string" + }, + "proxy_username": { + "title": "Proxy username", + "description": "The username for proxy authentication (if required).", + "type": "string", + "sensitive": true + }, + "proxy_password": { + "title": "Proxy password", + "description": "The password for proxy authentication (if required).", + "type": "string", + "sensitive": true + } + } + }, + "headersSectionConfig": { + "type": "object", + "properties": { + "headers": { + "title": "Headers", + "description": "Additional HTTP headers to include in the request.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, + "tlsSectionConfig": { + "type": "object", + "properties": { + "tls_skip_verify": { + "title": "Skip TLS verification", + "description": "If set to true, TLS certificate verification will be skipped.", + "type": "boolean" + }, + "tls_ca": { + "title": "TLS CA", + "description": "The path to the CA certificate file for TLS verification.", + "type": "string" + }, + "tls_cert": { + "title": "TLS certificate", + "description": "The path to the client certificate file for TLS authentication.", + "type": "string" + }, + "tls_key": { + "title": "TLS key", + "description": "The path to the client key file for TLS authentication.", + "type": "string" + } + } + } + } + }, + "uiSchema": { + "uiOptions": { + "fullPage": true }, - "tls_ca": { - "type": "string" + "configuration_section": { + "ui:widget": "radio", + "ui:options": { + "inline": true + } }, - "tls_cert": { - "type": "string" + "timeout": { + "ui:help": "Accepts decimals for precise control (e.g., type 1.5 for 1.5 seconds)." }, - "tls_key": { - "type": "string" + "password": { + "ui:widget": "password" }, - "insecure_skip_verify": { - "type": "boolean" + "proxy_password": { + "ui:widget": "password" } - }, - "required": [ - "name", - "url" - ] + } } diff --git a/modules/windows/init.go b/modules/windows/init.go index 34cf83672..51c3c4266 100644 --- a/modules/windows/init.go +++ b/modules/windows/init.go @@ -4,7 +4,6 @@ package windows import ( "errors" - "net/http" "github.com/netdata/go.d.plugin/pkg/prometheus" "github.com/netdata/go.d.plugin/pkg/web" @@ -17,10 +16,10 @@ func (w *Windows) validateConfig() error { return nil } -func (w *Windows) initHTTPClient() (*http.Client, error) { - return web.NewHTTPClient(w.Client) -} - -func (w *Windows) initPrometheusClient(client *http.Client) (prometheus.Prometheus, error) { +func (w *Windows) initPrometheusClient() (prometheus.Prometheus, error) { + client, err := web.NewHTTPClient(w.Client) + if err != nil { + return nil, err + } return prometheus.New(client, w.Request), nil } diff --git a/modules/windows/windows.go b/modules/windows/windows.go index e405887e0..493fa61a1 100644 --- a/modules/windows/windows.go +++ b/modules/windows/windows.go @@ -4,7 +4,7 @@ package windows import ( _ "embed" - "net/http" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -30,7 +30,7 @@ func New() *Windows { Config: Config{ HTTP: web.HTTP{ Client: web.Client{ - Timeout: web.Duration{Duration: time.Second * 5}, + Timeout: web.Duration(time.Second * 5), }, }, }, @@ -68,20 +68,20 @@ func New() *Windows { } type Config struct { - web.HTTP `yaml:",inline"` + web.HTTP `yaml:",inline" json:",inline"` + UpdateEvery int `yaml:"update_every" json:"update_every"` } type ( Windows struct { module.Base - Config `yaml:",inline"` + Config `yaml:",inline" json:",inline"` charts *module.Charts doCheck bool - httpClient *http.Client - prom prometheus.Prometheus + prom prometheus.Prometheus cache cache } @@ -116,31 +116,36 @@ type ( } ) -func (w *Windows) Init() bool { +func (w *Windows) Configuration() any { + return w.Config +} + +func (w *Windows) Init() error { if err := w.validateConfig(); err != nil { w.Errorf("config validation: %v", err) - return false - } - - httpClient, err := w.initHTTPClient() - if err != nil { - w.Errorf("init HTTP client: %v", err) - return false + return err } - w.httpClient = httpClient - prom, err := w.initPrometheusClient(w.httpClient) + prom, err := w.initPrometheusClient() if err != nil { w.Errorf("init prometheus clients: %v", err) - return false + return err } w.prom = prom - return true + return nil } -func (w *Windows) Check() bool { - return len(w.Collect()) > 0 +func (w *Windows) Check() error { + mx, err := w.collect() + if err != nil { + w.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (w *Windows) Charts() *module.Charts { @@ -160,7 +165,7 @@ func (w *Windows) Collect() map[string]int64 { } func (w *Windows) Cleanup() { - if w.httpClient != nil { - w.httpClient.CloseIdleConnections() + if w.prom != nil && w.prom.HTTPClient() != nil { + w.prom.HTTPClient().CloseIdleConnections() } } diff --git a/modules/windows/windows_test.go b/modules/windows/windows_test.go index b98e40de6..8e837a56b 100644 --- a/modules/windows/windows_test.go +++ b/modules/windows/windows_test.go @@ -57,9 +57,9 @@ func TestWindows_Init(t *testing.T) { win.Config = test.config if test.wantFail { - assert.False(t, win.Init()) + assert.Error(t, win.Init()) } else { - assert.True(t, win.Init()) + assert.NoError(t, win.Init()) } }) } @@ -92,12 +92,12 @@ func TestWindows_Check(t *testing.T) { win, cleanup := test.prepare() defer cleanup() - require.True(t, win.Init()) + require.NoError(t, win.Init()) if test.wantFail { - assert.False(t, win.Check()) + assert.Error(t, win.Check()) } else { - assert.True(t, win.Check()) + assert.NoError(t, win.Check()) } }) } @@ -789,7 +789,7 @@ func TestWindows_Collect(t *testing.T) { win, cleanup := test.prepare() defer cleanup() - require.True(t, win.Init()) + require.NoError(t, win.Init()) mx := win.Collect() diff --git a/modules/wireguard/config_schema.json b/modules/wireguard/config_schema.json index c6d6c261f..70dd0f6be 100644 --- a/modules/wireguard/config_schema.json +++ b/modules/wireguard/config_schema.json @@ -1,13 +1,20 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "title": "go.d/wireguard job configuration schema.", - "type": "object", - "properties": { - "name": { - "type": "string" - } + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "go.d/wireguard job configuration schema.", + "type": "object", + "properties": { + "name": { + "type": "string" + } + }, + "required": [ + "name" + ] }, - "required": [ - "name" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/wireguard/wireguard.go b/modules/wireguard/wireguard.go index 6587dce3c..7114baae5 100644 --- a/modules/wireguard/wireguard.go +++ b/modules/wireguard/wireguard.go @@ -4,6 +4,7 @@ package wireguard import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/agent/module" @@ -32,9 +33,14 @@ func New() *WireGuard { } } +type Config struct { + UpdateEvery int `yaml:"update_every" json:"update_every"` +} + type ( WireGuard struct { module.Base + Config `yaml:",inline"` charts *module.Charts @@ -53,12 +59,24 @@ type ( } ) -func (w *WireGuard) Init() bool { - return true +func (w *WireGuard) Configuration() any { + return w.Config } -func (w *WireGuard) Check() bool { - return len(w.Collect()) > 0 +func (w *WireGuard) Init() error { + return nil +} + +func (w *WireGuard) Check() error { + mx, err := w.collect() + if err != nil { + w.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (w *WireGuard) Charts() *module.Charts { diff --git a/modules/wireguard/wireguard_test.go b/modules/wireguard/wireguard_test.go index 5e6434dcc..9be84824d 100644 --- a/modules/wireguard/wireguard_test.go +++ b/modules/wireguard/wireguard_test.go @@ -17,7 +17,7 @@ import ( ) func TestWireGuard_Init(t *testing.T) { - assert.True(t, New().Init()) + assert.NoError(t, New().Init()) } func TestWireGuard_Charts(t *testing.T) { @@ -114,13 +114,13 @@ func TestWireGuard_Check(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { w := New() - require.True(t, w.Init()) + require.NoError(t, w.Init()) test.prepare(w) if test.wantFail { - assert.False(t, w.Check()) + assert.Error(t, w.Check()) } else { - assert.True(t, w.Check()) + assert.NoError(t, w.Check()) } }) } @@ -411,7 +411,7 @@ func TestWireGuard_Collect(t *testing.T) { for name, test := range tests { t.Run(name, func(t *testing.T) { w := New() - require.True(t, w.Init()) + require.NoError(t, w.Init()) m := &mockClient{} w.client = m diff --git a/modules/x509check/config_schema.json b/modules/x509check/config_schema.json index 5194715ae..a7afbde66 100644 --- a/modules/x509check/config_schema.json +++ b/modules/x509check/config_schema.json @@ -1,54 +1,61 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/x509check job configuration schema.", - "properties": { - "name": { - "type": "string" - }, - "source": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "tlscfg": { - "type": "object", - "properties": { - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/x509check job configuration schema.", + "properties": { + "name": { + "type": "string" + }, + "source": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "tlscfg": { + "type": "object", + "properties": { + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "tls_skip_verify": { + "type": "boolean" + } }, - "tls_skip_verify": { - "type": "boolean" - } + "required": [ + "tls_ca", + "tls_cert", + "tls_key" + ] }, - "required": [ - "tls_ca", - "tls_cert", - "tls_key" - ] - }, - "days_until_expiration_warning": { - "type": "integer" - }, - "days_until_expiration_critical": { - "type": "integer" + "days_until_expiration_warning": { + "type": "integer" + }, + "days_until_expiration_critical": { + "type": "integer" + }, + "check_revocation_status": { + "type": "boolean" + } }, - "check_revocation_status": { - "type": "boolean" - } + "required": [ + "name", + "source" + ] }, - "required": [ - "name", - "source" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/x509check/provider.go b/modules/x509check/provider.go index c5ac4d711..86d10176c 100644 --- a/modules/x509check/provider.go +++ b/modules/x509check/provider.go @@ -59,10 +59,10 @@ func newProvider(config Config) (provider, error) { if sourceURL.Scheme == "https" { sourceURL.Scheme = "tcp" } - return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil + return &fromNet{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil case "smtp": sourceURL.Scheme = "tcp" - return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration}, nil + return &fromSMTP{url: sourceURL, tlsConfig: tlsCfg, timeout: config.Timeout.Duration()}, nil default: return nil, fmt.Errorf("unsupported scheme '%s'", sourceURL) } diff --git a/modules/x509check/x509check.go b/modules/x509check/x509check.go index ed3a10b2f..89b93a265 100644 --- a/modules/x509check/x509check.go +++ b/modules/x509check/x509check.go @@ -4,6 +4,7 @@ package x509check import ( _ "embed" + "errors" "time" "github.com/netdata/go.d.plugin/pkg/tlscfg" @@ -30,7 +31,7 @@ func init() { func New() *X509Check { return &X509Check{ Config: Config{ - Timeout: web.Duration{Duration: time.Second * 2}, + Timeout: web.Duration(time.Second * 2), DaysUntilWarn: 14, DaysUntilCritical: 7, }, @@ -53,26 +54,38 @@ type X509Check struct { prov provider } -func (x *X509Check) Init() bool { +func (x *X509Check) Configuration() any { + return x.Config +} + +func (x *X509Check) Init() error { if err := x.validateConfig(); err != nil { x.Errorf("config validation: %v", err) - return false + return err } prov, err := x.initProvider() if err != nil { x.Errorf("certificate provider init: %v", err) - return false + return err } x.prov = prov x.charts = x.initCharts() - return true + return nil } -func (x *X509Check) Check() bool { - return len(x.Collect()) > 0 +func (x *X509Check) Check() error { + mx, err := x.collect() + if err != nil { + x.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } func (x *X509Check) Charts() *module.Charts { diff --git a/modules/x509check/x509check_test.go b/modules/x509check/x509check_test.go index 2c628af0a..bb82bd616 100644 --- a/modules/x509check/x509check_test.go +++ b/modules/x509check/x509check_test.go @@ -20,7 +20,7 @@ func TestX509Check_Cleanup(t *testing.T) { func TestX509Check_Charts(t *testing.T) { x509Check := New() x509Check.Source = "https://example.com" - require.True(t, x509Check.Init()) + require.NoError(t, x509Check.Init()) assert.NotNil(t, x509Check.Charts()) } @@ -70,9 +70,9 @@ func TestX509Check_Init(t *testing.T) { x509Check.Config = test.config if test.err { - assert.False(t, x509Check.Init()) + assert.Error(t, x509Check.Init()) } else { - require.True(t, x509Check.Init()) + require.NoError(t, x509Check.Init()) var typeOK bool switch test.providerType { @@ -94,20 +94,20 @@ func TestX509Check_Check(t *testing.T) { x509Check := New() x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}} - assert.True(t, x509Check.Check()) + assert.NoError(t, x509Check.Check()) } func TestX509Check_Check_ReturnsFalseOnProviderError(t *testing.T) { x509Check := New() x509Check.prov = &mockProvider{err: true} - assert.False(t, x509Check.Check()) + assert.Error(t, x509Check.Check()) } func TestX509Check_Collect(t *testing.T) { x509Check := New() x509Check.Source = "https://example.com" - require.True(t, x509Check.Init()) + require.NoError(t, x509Check.Init()) x509Check.prov = &mockProvider{certs: []*x509.Certificate{{}}} collected := x509Check.Collect() diff --git a/modules/zookeeper/collect.go b/modules/zookeeper/collect.go index 97d6f3e6c..86491e1b1 100644 --- a/modules/zookeeper/collect.go +++ b/modules/zookeeper/collect.go @@ -14,10 +14,12 @@ func (z *Zookeeper) collect() (map[string]int64, error) { func (z *Zookeeper) collectMntr() (map[string]int64, error) { const command = "mntr" + lines, err := z.fetch("mntr") if err != nil { return nil, err } + switch len(lines) { case 0: return nil, fmt.Errorf("'%s' command returned empty response", command) @@ -27,6 +29,7 @@ func (z *Zookeeper) collectMntr() (map[string]int64, error) { } mx := make(map[string]int64) + for _, line := range lines { parts := strings.Fields(line) if len(parts) != 2 || !strings.HasPrefix(parts[0], "zk_") { @@ -56,6 +59,7 @@ func (z *Zookeeper) collectMntr() (map[string]int64, error) { if len(mx) == 0 { return nil, fmt.Errorf("'%s' command: failed to parse response", command) } + return mx, nil } diff --git a/modules/zookeeper/config_schema.json b/modules/zookeeper/config_schema.json index 259987aba..46f496200 100644 --- a/modules/zookeeper/config_schema.json +++ b/modules/zookeeper/config_schema.json @@ -1,38 +1,41 @@ { - "$schema": "http://json-schema.org/draft-07/schema#", - "type": "object", - "title": "go.d/zookeeper job configuration schema.", - "properties": { - "name": { - "type": "string" + "jsonSchema": { + "$schema": "http://json-schema.org/draft-07/schema#", + "type": "object", + "title": "go.d/zookeeper job configuration schema.", + "properties": { + "address": { + "type": "string" + }, + "timeout": { + "type": [ + "string", + "integer" + ] + }, + "use_tls": { + "type": "boolean" + }, + "tls_ca": { + "type": "string" + }, + "tls_cert": { + "type": "string" + }, + "tls_key": { + "type": "string" + }, + "insecure_skip_verify": { + "type": "boolean" + } }, - "address": { - "type": "string" - }, - "timeout": { - "type": [ - "string", - "integer" - ] - }, - "use_tls": { - "type": "boolean" - }, - "tls_ca": { - "type": "string" - }, - "tls_cert": { - "type": "string" - }, - "tls_key": { - "type": "string" - }, - "insecure_skip_verify": { - "type": "boolean" - } + "required": [ + "address" + ] }, - "required": [ - "name", - "address" - ] + "uiSchema": { + "uiOptions": { + "fullPage": true + } + } } diff --git a/modules/zookeeper/fetcher.go b/modules/zookeeper/fetcher.go index 7c3aae0ea..cd9eed90d 100644 --- a/modules/zookeeper/fetcher.go +++ b/modules/zookeeper/fetcher.go @@ -39,9 +39,12 @@ func (c *zookeeperFetcher) fetch(command string) (rows []string, err error) { if err != nil { return nil, err } + return rows, nil } +func (c *zookeeperFetcher) disconnect() {} + func isZKLine(line []byte) bool { return bytes.HasPrefix(line, []byte("zk_")) } diff --git a/modules/zookeeper/init.go b/modules/zookeeper/init.go new file mode 100644 index 000000000..d865a0949 --- /dev/null +++ b/modules/zookeeper/init.go @@ -0,0 +1,41 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +package zookeeper + +import ( + "crypto/tls" + "errors" + "fmt" + + "github.com/netdata/go.d.plugin/pkg/socket" + "github.com/netdata/go.d.plugin/pkg/tlscfg" +) + +func (z *Zookeeper) verifyConfig() error { + if z.Address == "" { + return errors.New("address not set") + } + return nil +} + +func (z *Zookeeper) initZookeeperFetcher() (fetcher, error) { + var tlsConf *tls.Config + var err error + + if z.UseTLS { + tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig) + if err != nil { + return nil, fmt.Errorf("creating tls config : %v", err) + } + } + + sock := socket.New(socket.Config{ + Address: z.Address, + ConnectTimeout: z.Timeout.Duration(), + ReadTimeout: z.Timeout.Duration(), + WriteTimeout: z.Timeout.Duration(), + TLSConf: tlsConf, + }) + + return &zookeeperFetcher{Client: sock}, nil +} diff --git a/modules/zookeeper/zookeeper.go b/modules/zookeeper/zookeeper.go index 29ab1f858..f96933831 100644 --- a/modules/zookeeper/zookeeper.go +++ b/modules/zookeeper/zookeeper.go @@ -3,12 +3,10 @@ package zookeeper import ( - "crypto/tls" _ "embed" - "fmt" + "errors" "time" - "github.com/netdata/go.d.plugin/pkg/socket" "github.com/netdata/go.d.plugin/pkg/tlscfg" "github.com/netdata/go.d.plugin/pkg/web" @@ -25,6 +23,16 @@ func init() { }) } +// New creates Zookeeper with default values. +func New() *Zookeeper { + return &Zookeeper{ + Config: Config{ + Address: "127.0.0.1:2181", + Timeout: web.Duration(time.Second), + UseTLS: false, + }} +} + // Config is the Zookeeper module configuration. type Config struct { Address string @@ -33,68 +41,55 @@ type Config struct { tlscfg.TLSConfig `yaml:",inline"` } -// New creates Zookeeper with default values. -func New() *Zookeeper { - config := Config{ - Address: "127.0.0.1:2181", - Timeout: web.Duration{Duration: time.Second}, - UseTLS: false, - } - return &Zookeeper{Config: config} -} - -type fetcher interface { - fetch(command string) ([]string, error) -} - // Zookeeper Zookeeper module. -type Zookeeper struct { - module.Base - fetcher - Config `yaml:",inline"` -} +type ( + Zookeeper struct { + module.Base + Config `yaml:",inline"` -// Cleanup makes cleanup. -func (Zookeeper) Cleanup() {} - -func (z *Zookeeper) createZookeeperFetcher() (err error) { - var tlsConf *tls.Config - if z.UseTLS { - tlsConf, err = tlscfg.NewTLSConfig(z.TLSConfig) - if err != nil { - return fmt.Errorf("error on creating tls config : %v", err) - } + fetcher + } + fetcher interface { + fetch(command string) ([]string, error) } +) - sock := socket.New(socket.Config{ - Address: z.Address, - ConnectTimeout: z.Timeout.Duration, - ReadTimeout: z.Timeout.Duration, - WriteTimeout: z.Timeout.Duration, - TLSConf: tlsConf, - }) - z.fetcher = &zookeeperFetcher{Client: sock} - return nil +func (z *Zookeeper) Configuration() any { + return z.Config } // Init makes initialization. -func (z *Zookeeper) Init() bool { - err := z.createZookeeperFetcher() +func (z *Zookeeper) Init() error { + if err := z.verifyConfig(); err != nil { + z.Error(err) + return err + } + + f, err := z.initZookeeperFetcher() if err != nil { z.Error(err) - return false + return err } + z.fetcher = f - return true + return nil } // Check makes check. -func (z *Zookeeper) Check() bool { - return len(z.Collect()) > 0 +func (z *Zookeeper) Check() error { + mx, err := z.collect() + if err != nil { + z.Error(err) + return err + } + if len(mx) == 0 { + return errors.New("no metrics collected") + } + return nil } // Charts creates Charts. -func (Zookeeper) Charts() *Charts { +func (z *Zookeeper) Charts() *Charts { return charts.Copy() } @@ -110,3 +105,6 @@ func (z *Zookeeper) Collect() map[string]int64 { } return mx } + +// Cleanup makes cleanup. +func (z *Zookeeper) Cleanup() {} diff --git a/modules/zookeeper/zookeeper_test.go b/modules/zookeeper/zookeeper_test.go index 13f3632c2..8aaac2ed1 100644 --- a/modules/zookeeper/zookeeper_test.go +++ b/modules/zookeeper/zookeeper_test.go @@ -32,7 +32,7 @@ func TestNew(t *testing.T) { func TestZookeeper_Init(t *testing.T) { job := New() - assert.True(t, job.Init()) + assert.NoError(t, job.Init()) assert.NotNil(t, job.fetcher) } @@ -41,23 +41,23 @@ func TestZookeeper_InitErrorOnCreatingTLSConfig(t *testing.T) { job.UseTLS = true job.TLSConfig.TLSCA = "testdata/tls" - assert.False(t, job.Init()) + assert.Error(t, job.Init()) } func TestZookeeper_Check(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: testMntrData} - assert.True(t, job.Check()) + assert.NoError(t, job.Check()) } func TestZookeeper_CheckErrorOnFetch(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{err: true} - assert.False(t, job.Check()) + assert.Error(t, job.Check()) } func TestZookeeper_Charts(t *testing.T) { @@ -70,7 +70,7 @@ func TestZookeeper_Cleanup(t *testing.T) { func TestZookeeper_Collect(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: testMntrData} expected := map[string]int64{ @@ -98,7 +98,7 @@ func TestZookeeper_Collect(t *testing.T) { func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: testMntrNotInWhiteListData} assert.Nil(t, job.Collect()) @@ -106,7 +106,7 @@ func TestZookeeper_CollectMntrNotInWhiteList(t *testing.T) { func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{} assert.Nil(t, job.Collect()) @@ -114,7 +114,7 @@ func TestZookeeper_CollectMntrEmptyResponse(t *testing.T) { func TestZookeeper_CollectMntrInvalidData(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{data: []byte("hello \nand good buy\n")} assert.Nil(t, job.Collect()) @@ -122,7 +122,7 @@ func TestZookeeper_CollectMntrInvalidData(t *testing.T) { func TestZookeeper_CollectMntrReceiveError(t *testing.T) { job := New() - require.True(t, job.Init()) + require.NoError(t, job.Init()) job.fetcher = &mockZookeeperFetcher{err: true} assert.Nil(t, job.Collect()) diff --git a/pkg/logs/csv.go b/pkg/logs/csv.go index 3a7610a70..0b7d90009 100644 --- a/pkg/logs/csv.go +++ b/pkg/logs/csv.go @@ -14,11 +14,11 @@ import ( type ( CSVConfig struct { - FieldsPerRecord int `yaml:"fields_per_record"` - Delimiter string `yaml:"delimiter"` - TrimLeadingSpace bool `yaml:"trim_leading_space"` - Format string `yaml:"format"` - CheckField func(string) (string, int, bool) `yaml:"-"` + FieldsPerRecord int `yaml:"fields_per_record" json:"fields_per_record"` + Delimiter string `yaml:"delimiter" json:"delimiter"` + TrimLeadingSpace bool `yaml:"trim_leading_space" json:"trim_leading_space"` + Format string `yaml:"format" json:"format"` + CheckField func(string) (string, int, bool) `yaml:"-" json:"-"` } CSVParser struct { diff --git a/pkg/logs/json.go b/pkg/logs/json.go index cfd6c83e7..ceb32e272 100644 --- a/pkg/logs/json.go +++ b/pkg/logs/json.go @@ -12,7 +12,7 @@ import ( ) type JSONConfig struct { - Mapping map[string]string `yaml:"mapping"` + Mapping map[string]string `yaml:"mapping" json:"mapping"` } type JSONParser struct { diff --git a/pkg/logs/ltsv.go b/pkg/logs/ltsv.go index 558f9e076..b7fbceb14 100644 --- a/pkg/logs/ltsv.go +++ b/pkg/logs/ltsv.go @@ -15,9 +15,9 @@ import ( type ( LTSVConfig struct { - FieldDelimiter string `yaml:"field_delimiter"` - ValueDelimiter string `yaml:"value_delimiter"` - Mapping map[string]string `yaml:"mapping"` + FieldDelimiter string `yaml:"field_delimiter" json:"field_delimiter"` + ValueDelimiter string `yaml:"value_delimiter" json:"value_delimiter"` + Mapping map[string]string `yaml:"mapping" json:"mapping"` } LTSVParser struct { diff --git a/pkg/logs/parser.go b/pkg/logs/parser.go index f1807283a..d83b4309d 100644 --- a/pkg/logs/parser.go +++ b/pkg/logs/parser.go @@ -40,11 +40,11 @@ const ( ) type ParserConfig struct { - LogType string `yaml:"log_type"` - CSV CSVConfig `yaml:"csv_config"` - LTSV LTSVConfig `yaml:"ltsv_config"` - RegExp RegExpConfig `yaml:"regexp_config"` - JSON JSONConfig `yaml:"json_config"` + LogType string `yaml:"log_type" json:"log_type"` + CSV CSVConfig `yaml:"csv_config" json:"csv_config"` + LTSV LTSVConfig `yaml:"ltsv_config" json:"ltsv_config"` + RegExp RegExpConfig `yaml:"regexp_config" json:"regexp_config"` + JSON JSONConfig `yaml:"json_config" json:"json_config"` } func NewParser(config ParserConfig, in io.Reader) (Parser, error) { diff --git a/pkg/logs/regexp.go b/pkg/logs/regexp.go index 84b725fd9..e0dee1d02 100644 --- a/pkg/logs/regexp.go +++ b/pkg/logs/regexp.go @@ -12,7 +12,7 @@ import ( type ( RegExpConfig struct { - Pattern string `yaml:"pattern"` + Pattern string `yaml:"pattern" json:"pattern"` } RegExpParser struct { diff --git a/pkg/matcher/glob.go b/pkg/matcher/glob.go index f8cd5b072..726c94c45 100644 --- a/pkg/matcher/glob.go +++ b/pkg/matcher/glob.go @@ -3,11 +3,10 @@ package matcher import ( + "errors" "path/filepath" "regexp" "unicode/utf8" - - "errors" ) // globMatcher implements Matcher, it uses filepath.MatchString to match. diff --git a/pkg/tlscfg/config.go b/pkg/tlscfg/config.go index 26051e486..60e152e0f 100644 --- a/pkg/tlscfg/config.go +++ b/pkg/tlscfg/config.go @@ -12,16 +12,16 @@ import ( // TLSConfig represents the standard client TLS configuration. type TLSConfig struct { // TLSCA specifies the certificate authority to use when verifying server certificates. - TLSCA string `yaml:"tls_ca"` + TLSCA string `yaml:"tls_ca" json:"tls_ca"` // TLSCert specifies tls certificate file. - TLSCert string `yaml:"tls_cert"` + TLSCert string `yaml:"tls_cert" json:"tls_cert"` // TLSKey specifies tls key file. - TLSKey string `yaml:"tls_key"` + TLSKey string `yaml:"tls_key" json:"tls_key"` // InsecureSkipVerify controls whether a client verifies the server's certificate chain and host name. - InsecureSkipVerify bool `yaml:"tls_skip_verify"` + InsecureSkipVerify bool `yaml:"tls_skip_verify" json:"tls_skip_verify"` } // NewTLSConfig creates a tls.Config, may be nil without an error if TLS is not configured. diff --git a/pkg/web/client.go b/pkg/web/client.go index ae3ecd462..eb0e3c30c 100644 --- a/pkg/web/client.go +++ b/pkg/web/client.go @@ -21,18 +21,18 @@ var ErrRedirectAttempted = errors.New("redirect") type Client struct { // Timeout specifies a time limit for requests made by this Client. // Default (zero value) is no timeout. Must be set before http.Client creation. - Timeout Duration `yaml:"timeout"` + Timeout Duration `yaml:"timeout" json:"timeout"` // NotFollowRedirect specifies the policy for handling redirects. // Default (zero value) is std http package default policy (stop after 10 consecutive requests). - NotFollowRedirect bool `yaml:"not_follow_redirects"` + NotFollowRedirect bool `yaml:"not_follow_redirects" json:"not_follow_redirect"` // ProxyURL specifies the URL of the proxy to use. An empty string means use the environment variables // HTTP_PROXY, HTTPS_PROXY and NO_PROXY (or the lowercase versions thereof) to get the URL. - ProxyURL string `yaml:"proxy_url"` + ProxyURL string `yaml:"proxy_url" json:"proxy_url"` // TLSConfig specifies the TLS configuration. - tlscfg.TLSConfig `yaml:",inline"` + tlscfg.TLSConfig `yaml:",inline" json:",inline"` } // NewHTTPClient returns a new *http.Client given a Client configuration and an error if any. @@ -48,17 +48,17 @@ func NewHTTPClient(cfg Client) (*http.Client, error) { } } - d := &net.Dialer{Timeout: cfg.Timeout.Duration} + d := &net.Dialer{Timeout: cfg.Timeout.Duration()} transport := &http.Transport{ Proxy: proxyFunc(cfg.ProxyURL), TLSClientConfig: tlsConfig, DialContext: d.DialContext, - TLSHandshakeTimeout: cfg.Timeout.Duration, + TLSHandshakeTimeout: cfg.Timeout.Duration(), } return &http.Client{ - Timeout: cfg.Timeout.Duration, + Timeout: cfg.Timeout.Duration(), Transport: transport, CheckRedirect: redirectFunc(cfg.NotFollowRedirect), }, nil diff --git a/pkg/web/client_test.go b/pkg/web/client_test.go index e11d6ce47..ead1486c3 100644 --- a/pkg/web/client_test.go +++ b/pkg/web/client_test.go @@ -12,7 +12,7 @@ import ( func TestNewHTTPClient(t *testing.T) { client, _ := NewHTTPClient(Client{ - Timeout: Duration{Duration: time.Second * 5}, + Timeout: Duration(time.Second * 5), NotFollowRedirect: true, ProxyURL: "http://127.0.0.1:3128", }) diff --git a/pkg/web/duration.go b/pkg/web/duration.go index ced991f91..85d5ef650 100644 --- a/pkg/web/duration.go +++ b/pkg/web/duration.go @@ -3,17 +3,22 @@ package web import ( + "encoding/json" "fmt" "strconv" "time" ) -// Duration is a time.Duration wrapper. -type Duration struct { - Duration time.Duration +type Duration time.Duration + +func (d Duration) Duration() time.Duration { + return time.Duration(d) +} + +func (d Duration) String() string { + return d.Duration().String() } -// UnmarshalYAML implements yaml.Unmarshaler. func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { var s string @@ -22,18 +27,46 @@ func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { } if v, err := time.ParseDuration(s); err == nil { - d.Duration = v + *d = Duration(v) return nil } if v, err := strconv.ParseInt(s, 10, 64); err == nil { - d.Duration = time.Duration(v) * time.Second + *d = Duration(time.Duration(v) * time.Second) return nil } if v, err := strconv.ParseFloat(s, 64); err == nil { - d.Duration = time.Duration(v) * time.Second + *d = Duration(v * float64(time.Second)) return nil } + return fmt.Errorf("unparsable duration format '%s'", s) } -func (d Duration) String() string { return d.Duration.String() } +func (d Duration) MarshalYAML() (any, error) { + seconds := float64(d) / float64(time.Second) + return seconds, nil +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + s := string(b) + + if v, err := time.ParseDuration(s); err == nil { + *d = Duration(v) + return nil + } + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + *d = Duration(time.Duration(v) * time.Second) + return nil + } + if v, err := strconv.ParseFloat(s, 64); err == nil { + *d = Duration(v * float64(time.Second)) + return nil + } + + return fmt.Errorf("unparsable duration format '%s'", s) +} + +func (d Duration) MarshalJSON() ([]byte, error) { + seconds := float64(d) / float64(time.Second) + return json.Marshal(seconds) +} diff --git a/pkg/web/duration_test.go b/pkg/web/duration_test.go index 01ee19dd2..b45063f13 100644 --- a/pkg/web/duration_test.go +++ b/pkg/web/duration_test.go @@ -3,22 +3,112 @@ package web import ( + "encoding/json" + "fmt" + "strings" "testing" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/yaml.v2" ) +func TestDuration_MarshalYAML(t *testing.T) { + tests := map[string]struct { + d Duration + want string + }{ + "1 second": {d: Duration(time.Second), want: "1"}, + "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + bs, err := yaml.Marshal(&test.d) + require.NoError(t, err) + + assert.Equal(t, test.want, strings.TrimSpace(string(bs))) + }) + } +} + +func TestDuration_MarshalJSON(t *testing.T) { + tests := map[string]struct { + d Duration + want string + }{ + "1 second": {d: Duration(time.Second), want: "1"}, + "1.5 seconds": {d: Duration(time.Second + time.Millisecond*500), want: "1.5"}, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + bs, err := json.Marshal(&test.d) + require.NoError(t, err) + + assert.Equal(t, test.want, strings.TrimSpace(string(bs))) + }) + } +} + func TestDuration_UnmarshalYAML(t *testing.T) { - var d Duration - values := [][]byte{ - []byte("100ms"), // duration - []byte("3s300ms"), // duration - []byte("3"), // int - []byte("3.3"), // float + tests := map[string]struct { + input any + }{ + "duration": {input: "300ms"}, + "string int": {input: "1"}, + "string float": {input: "1.1"}, + "int": {input: 2}, + "float": {input: 2.2}, } - for _, v := range values { - assert.NoError(t, yaml.Unmarshal(v, &d)) + var zero Duration + + for name, test := range tests { + name = fmt.Sprintf("%s (%v)", name, test.input) + t.Run(name, func(t *testing.T) { + data, err := yaml.Marshal(test.input) + require.NoError(t, err) + + var d Duration + require.NoError(t, yaml.Unmarshal(data, &d)) + assert.NotEqual(t, zero.String(), d.String()) + }) + } +} + +func TestDuration_UnmarshalJSON(t *testing.T) { + tests := map[string]struct { + input any + }{ + "duration": {input: "300ms"}, + "string int": {input: "1"}, + "string float": {input: "1.1"}, + "int": {input: 2}, + "float": {input: 2.2}, + } + + var zero Duration + + type duration struct { + D Duration `json:"d"` + } + type input struct { + D any `json:"d"` + } + + for name, test := range tests { + name = fmt.Sprintf("%s (%v)", name, test.input) + t.Run(name, func(t *testing.T) { + input := input{D: test.input} + data, err := yaml.Marshal(input) + require.NoError(t, err) + + var d duration + require.NoError(t, yaml.Unmarshal(data, &d)) + assert.NotEqual(t, zero.String(), d.D.String()) + }) } } diff --git a/pkg/web/request.go b/pkg/web/request.go index 5740da6d1..3db08f734 100644 --- a/pkg/web/request.go +++ b/pkg/web/request.go @@ -14,30 +14,30 @@ import ( // Supported configuration file formats: YAML. type Request struct { // URL specifies the URL to access. - URL string `yaml:"url"` + URL string `yaml:"url" json:"url"` // Body specifies the HTTP request body to be sent by the client. - Body string `yaml:"body"` + Body string `yaml:"body" json:"body"` // Method specifies the HTTP method (GET, POST, PUT, etc.). An empty string means GET. - Method string `yaml:"method"` + Method string `yaml:"method" json:"method"` // Headers specifies the HTTP request header fields to be sent by the client. - Headers map[string]string `yaml:"headers"` + Headers map[string]string `yaml:"headers" json:"headers"` // Username specifies the username for basic HTTP authentication. - Username string `yaml:"username"` + Username string `yaml:"username" json:"username"` // Password specifies the password for basic HTTP authentication. - Password string `yaml:"password"` + Password string `yaml:"password" json:"password"` // ProxyUsername specifies the username for basic HTTP authentication. // It is used to authenticate a user agent to a proxy server. - ProxyUsername string `yaml:"proxy_username"` + ProxyUsername string `yaml:"proxy_username" json:"proxy_username"` // ProxyPassword specifies the password for basic HTTP authentication. // It is used to authenticate a user agent to a proxy server. - ProxyPassword string `yaml:"proxy_password"` + ProxyPassword string `yaml:"proxy_password" json:"proxy_password"` } // Copy makes a full copy of the Request. diff --git a/pkg/web/web.go b/pkg/web/web.go index e2a7098ba..07cef4839 100644 --- a/pkg/web/web.go +++ b/pkg/web/web.go @@ -6,6 +6,6 @@ package web // This structure intended to be part of the module configuration. // Supported configuration file formats: YAML. type HTTP struct { - Request `yaml:",inline"` - Client `yaml:",inline"` + Request `yaml:",inline" json:",inline"` + Client `yaml:",inline" json:",inline"` }