diff --git a/agent/container/pkg/application/githubmodule.go b/agent/container/pkg/application/githubmodule.go index b2b766b4..bebae4ea 100644 --- a/agent/container/pkg/application/githubmodule.go +++ b/agent/container/pkg/application/githubmodule.go @@ -123,7 +123,7 @@ func (c *GithubApiClient) FetchVersions(packageName string) ([]Version, error) { var versions []Version err = json.Unmarshal(body, &versions) if err != nil { - return nil, fmt.Errorf("error occured while unmarshalling the version %w", err) + return nil, fmt.Errorf("error occurred while unmarshalling the version %w", err) } return versions, nil diff --git a/agent/container/pkg/application/handlers.go b/agent/container/pkg/application/handlers.go index 6d94f33c..c10541d7 100755 --- a/agent/container/pkg/application/handlers.go +++ b/agent/container/pkg/application/handlers.go @@ -12,10 +12,10 @@ import ( "go.opentelemetry.io/otel/attribute" ) -//githubHandler handles the github webhooks post requests. +// githubHandler handles the github webhooks post requests. func (app *Application) localRegistryHandler(w http.ResponseWriter, r *http.Request) { - ctx:=context.Background() + ctx := context.Background() tracer := otel.Tracer("container-gitlab") _, span := tracer.Start(opentelemetry.BuildContext(ctx), "localRegistryHandler") span.SetAttributes(attribute.String("http.method", "POST")) diff --git a/agent/container/pkg/handler/api_handler.go b/agent/container/pkg/handler/api_handler.go index 6efee78d..63ecb920 100755 --- a/agent/container/pkg/handler/api_handler.go +++ b/agent/container/pkg/handler/api_handler.go @@ -42,7 +42,7 @@ func (ah *APIHandler) BindRequest(r *gin.Engine) { apiGroup := r.Group("/") { - apiGroup.GET("/api-docs", ah.GetApiDocs) + apiGroup.GET("/api-docs", ah.GetAPIDocs) apiGroup.GET("/status", ah.GetStatus) apiGroup.POST("/event/docker/hub", ah.PostEventDockerHub) apiGroup.POST("/event/azure/container", ah.PostEventAzureContainer) @@ -54,7 +54,7 @@ func (ah *APIHandler) BindRequest(r *gin.Engine) { // GetApiDocs serves the Swagger API documentation generated from the OpenAPI YAML file. // It responds with a JSON representation of the API's endpoints, parameters, responses, and other details. // This endpoint can be used by tools like Swagger UI to provide interactive documentation for the API. -func (ah *APIHandler) GetApiDocs(c *gin.Context) { +func (ah *APIHandler) GetAPIDocs(c *gin.Context) { swagger, err := api.GetSwagger() if err != nil { c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) diff --git a/agent/kubviz/plugins/events/event_metrics_utils.go b/agent/kubviz/plugins/events/event_metrics_utils.go index 17ef114f..e4078fba 100644 --- a/agent/kubviz/plugins/events/event_metrics_utils.go +++ b/agent/kubviz/plugins/events/event_metrics_utils.go @@ -56,8 +56,8 @@ func publishK8sMetrics(id string, mtype string, mdata *v1.Event, js nats.JetStre ClusterName: ClusterName, ImageName: imageName, } - metricsJson, _ := json.Marshal(metrics) - _, err := js.Publish(constants.EventSubject, metricsJson) + metricsJSON, _ := json.Marshal(metrics) + _, err := js.Publish(constants.EventSubject, metricsJSON) if err != nil { return true, err } diff --git a/agent/kubviz/plugins/ketall/ketall.go b/agent/kubviz/plugins/ketall/ketall.go index 8d91b6ab..49b07307 100644 --- a/agent/kubviz/plugins/ketall/ketall.go +++ b/agent/kubviz/plugins/ketall/ketall.go @@ -25,8 +25,8 @@ var ClusterName string = os.Getenv("CLUSTER_NAME") func PublishAllResources(result model.Resource, js nats.JetStreamContext) error { metrics := result metrics.ClusterName = ClusterName - metricsJson, _ := json.Marshal(metrics) - _, err := js.Publish(constants.EventSubject_getall_resource, metricsJson) + metricsJSON, _ := json.Marshal(metrics) + _, err := js.Publish(constants.EventSubject_getall_resource, metricsJSON) if err != nil { return err } diff --git a/agent/kubviz/plugins/kubepreupgrade/kubePreUpgrade.go b/agent/kubviz/plugins/kubepreupgrade/kubePreUpgrade.go index 4fb00b35..2a390f5c 100644 --- a/agent/kubviz/plugins/kubepreupgrade/kubePreUpgrade.go +++ b/agent/kubviz/plugins/kubepreupgrade/kubePreUpgrade.go @@ -53,13 +53,13 @@ type PreferredResource map[string]ResourceStruct var ( k8sVersion = "master" - deletedApiReplacements = map[string]groupResourceKind{ + deletedAPIReplacements = map[string]groupResourceKind{ "extensions/v1beta1/Ingress": {"networking.k8s.io/v1", "ingresses", "Ingress"}, } ) var result *model.Result -func publishK8sDepricated_Deleted_Api(result *model.Result, js nats.JetStreamContext) error { +func publishK8sDepricatedDeletedAPI(result *model.Result, js nats.JetStreamContext) error { for _, deprecatedAPI := range result.DeprecatedAPIs { deprecatedAPI.ClusterName = ClusterName deprecatedAPIJson, _ := json.Marshal(deprecatedAPI) @@ -109,7 +109,7 @@ func KubePreUpgradeDetector(config *rest.Config, js nats.JetStreamContext) error return err } result = getResults(config, kubernetesAPIs) - err = publishK8sDepricated_Deleted_Api(result, js) + err = publishK8sDepricatedDeletedAPI(result, js) return err } @@ -371,7 +371,7 @@ func getResults(configRest *rest.Config, kubeAPIs model.KubernetesAPIs) *model.R gvr, list := getResources(client, groupResourceKind{resourceGroupVersion.GroupVersion, resource.Name, resource.Kind}) - if newApi, ok := deletedApiReplacements[keyAPI]; ok { + if newApi, ok := deletedAPIReplacements[keyAPI]; ok { list.Items = fixDeletedItemsList(client, list.Items, newApi) } diff --git a/agent/kubviz/plugins/kubescore/kube_score.go b/agent/kubviz/plugins/kubescore/kube_score.go index 660aa175..36860693 100644 --- a/agent/kubviz/plugins/kubescore/kube_score.go +++ b/agent/kubviz/plugins/kubescore/kube_score.go @@ -56,9 +56,6 @@ func publish(ns string, js nats.JetStreamContext) error { publishKubescoreMetrics(report, js) //err = publishKubescoreMetrics(uuid.New().String(), ns, out, js) - if err != nil { - return err - } return nil } @@ -75,8 +72,8 @@ func publishKubescoreMetrics(report []json_v2.ScoredObject, js nats.JetStreamCon ClusterName: ClusterName, Report: report, } - metricsJson, _ := json.Marshal(metrics) - _, err := js.Publish(constants.KUBESCORE_SUBJECT, metricsJson) + metricsJSON, _ := json.Marshal(metrics) + _, err := js.Publish(constants.KUBESCORE_SUBJECT, metricsJSON) if err != nil { return err } diff --git a/agent/kubviz/plugins/outdated/outdated.go b/agent/kubviz/plugins/outdated/outdated.go index 975c510f..32dcd799 100644 --- a/agent/kubviz/plugins/outdated/outdated.go +++ b/agent/kubviz/plugins/outdated/outdated.go @@ -70,8 +70,8 @@ func PublishOutdatedImages(out model.CheckResultfinal, js nats.JetStreamContext) metrics := out metrics.ClusterName = ClusterName - metricsJson, _ := json.Marshal(metrics) - _, err := js.Publish(constants.EventSubject_outdated_images, metricsJson) + metricsJSON, _ := json.Marshal(metrics) + _, err := js.Publish(constants.EventSubject_outdated_images, metricsJSON) if err != nil { return err } diff --git a/agent/kubviz/plugins/rakkess/rakees_agent.go b/agent/kubviz/plugins/rakkess/rakees_agent.go index 93414db3..c6a7a76b 100644 --- a/agent/kubviz/plugins/rakkess/rakees_agent.go +++ b/agent/kubviz/plugins/rakkess/rakees_agent.go @@ -95,8 +95,8 @@ func RakeesOutput(config *rest.Config, js nats.JetStreamContext) error { List: HumanreadableAccessCode(listOutcome), Update: HumanreadableAccessCode(updateOutcome), } - metricsJson, _ := json.Marshal(metrics) - _, err = js.Publish(constants.EventSubject_rakees, metricsJson) + metricsJSON, _ := json.Marshal(metrics) + _, err = js.Publish(constants.EventSubject_rakees, metricsJSON) if err != nil { return err } diff --git a/agent/kubviz/plugins/trivy/trivy.go b/agent/kubviz/plugins/trivy/trivy.go index 625b405a..a354f13e 100644 --- a/agent/kubviz/plugins/trivy/trivy.go +++ b/agent/kubviz/plugins/trivy/trivy.go @@ -100,8 +100,8 @@ func PublishTrivyK8sReport(report report.ConsolidatedReport, js nats.JetStreamCo ClusterName: ClusterName, Report: report, } - metricsJson, _ := json.Marshal(metrics) - _, err := js.Publish(constants.TRIVY_K8S_SUBJECT, metricsJson) + metricsJSON, _ := json.Marshal(metrics) + _, err := js.Publish(constants.TRIVY_K8S_SUBJECT, metricsJSON) if err != nil { return err } diff --git a/agent/kubviz/plugins/trivy/trivy_image.go b/agent/kubviz/plugins/trivy/trivy_image.go index c4919a18..1cca296d 100644 --- a/agent/kubviz/plugins/trivy/trivy_image.go +++ b/agent/kubviz/plugins/trivy/trivy_image.go @@ -42,7 +42,7 @@ func RunTrivyImageScans(config *rest.Config, js nats.JetStreamContext) error { images, err := ListImages(config) if err != nil { - log.Println("error occured while trying to list images, error :", err.Error()) + log.Println("error occurred while trying to list images, error :", err.Error()) return err } @@ -90,8 +90,8 @@ func PublishImageScanReports(report types.Report, js nats.JetStreamContext) erro ClusterName: ClusterName, Report: report, } - metricsJson, _ := json.Marshal(metrics) - _, err := js.Publish(constants.TRIVY_IMAGE_SUBJECT, metricsJson) + metricsJSON, _ := json.Marshal(metrics) + _, err := js.Publish(constants.TRIVY_IMAGE_SUBJECT, metricsJSON) if err != nil { return err } diff --git a/agent/kubviz/plugins/trivy/trivy_sbom.go b/agent/kubviz/plugins/trivy/trivy_sbom.go index 62027761..170d8a65 100644 --- a/agent/kubviz/plugins/trivy/trivy_sbom.go +++ b/agent/kubviz/plugins/trivy/trivy_sbom.go @@ -29,12 +29,12 @@ func PublishTrivySbomReport(report map[string]interface{}, js nats.JetStreamCont ClusterName: ClusterName, Report: report, } - metricsJson, err := json.Marshal(metrics) + metricsJSON, err := json.Marshal(metrics) if err != nil { log.Println("error occurred while marshalling sbom metrics in agent", err.Error()) return err } - _, err = js.Publish(constants.TRIVY_SBOM_SUBJECT, metricsJson) + _, err = js.Publish(constants.TRIVY_SBOM_SUBJECT, metricsJSON) if err != nil { return err } diff --git a/client/pkg/clickhouse/db_client.go b/client/pkg/clickhouse/db_client.go index 065631f6..9e265b95 100644 --- a/client/pkg/clickhouse/db_client.go +++ b/client/pkg/clickhouse/db_client.go @@ -60,7 +60,7 @@ func NewDBClient(conf *config.Config) (DBInterface, *sql.DB, error) { if conf.ClickHouseUsername != "" && conf.ClickHousePassword != "" { fmt.Println("Using provided username and password") connOptions = clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)}, + Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DBPort)}, Debug: true, Auth: clickhouse.Auth{ Username: conf.ClickHouseUsername, @@ -77,7 +77,7 @@ func NewDBClient(conf *config.Config) (DBInterface, *sql.DB, error) { } else { fmt.Println("Using connection without username and password") connOptions = clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)}, + Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DBPort)}, Debug: true, Debugf: func(format string, v ...interface{}) { fmt.Printf(format, v...) @@ -109,7 +109,7 @@ func NewDBClient(conf *config.Config) (DBInterface, *sql.DB, error) { if conf.ClickHouseUsername != "" && conf.ClickHousePassword != "" { fmt.Println("Using provided username and password") connOption = clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)}, + Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DBPort)}, Debug: true, Auth: clickhouse.Auth{ Username: conf.ClickHouseUsername, @@ -119,7 +119,7 @@ func NewDBClient(conf *config.Config) (DBInterface, *sql.DB, error) { } else { fmt.Println("Using connection without username and password") connOption = clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DbPort)}, + Addr: []string{fmt.Sprintf("%s:%d", conf.DBAddress, conf.DBPort)}, } } @@ -583,7 +583,7 @@ func (c *DBClient) InsertKubvizEvent(metrics model.Metrics) { } defer stmt.Close() - eventJson, _ := json.Marshal(metrics.Event) + eventJSON, _ := json.Marshal(metrics.Event) formattedFirstTimestamp := metrics.Event.FirstTimestamp.Time.UTC().Format("2006-01-02 15:04:05") formattedLastTimestamp := metrics.Event.LastTimestamp.Time.UTC().Format("2006-01-02 15:04:05") @@ -598,7 +598,7 @@ func (c *DBClient) InsertKubvizEvent(metrics model.Metrics) { metrics.Event.Message, metrics.Event.Reason, metrics.Event.Source.Host, - string(eventJson), + string(eventJSON), metrics.ImageName, formattedFirstTimestamp, formattedLastTimestamp, @@ -936,7 +936,7 @@ func (c *DBClient) Close() { } func DbUrl(conf *config.Config) string { - return fmt.Sprintf("tcp://%s:%d?debug=true", conf.DBAddress, conf.DbPort) + return fmt.Sprintf("tcp://%s:%d?debug=true", conf.DBAddress, conf.DBPort) } func (c *DBClient) RetriveKetallEvent() ([]model.Resource, error) { rows, err := c.conn.Query("SELECT ClusterName, Namespace, Kind, Resource, Age FROM getall_resources") @@ -1018,8 +1018,8 @@ func (c *DBClient) RetrieveKubvizEvent() ([]model.DbEvent, error) { log.Printf("Error: %s", err) return nil, err } - eventJson, _ := json.Marshal(dbEvent) - log.Printf("DB Event: %s", string(eventJson)) + eventJSON, _ := json.Marshal(dbEvent) + log.Printf("DB Event: %s", string(eventJSON)) events = append(events, dbEvent) } diff --git a/client/pkg/clickhouse/statements.go b/client/pkg/clickhouse/statements.go index 22406fbe..3761eb33 100644 --- a/client/pkg/clickhouse/statements.go +++ b/client/pkg/clickhouse/statements.go @@ -2,219 +2,219 @@ package clickhouse type DBStatement string -const kubvizTable DBStatement = ` - CREATE TABLE IF NOT EXISTS events ( - ClusterName String, - Id String, - EventTime DateTime('UTC'), - OpType String, - Name String, - Namespace String, - Kind String, - Message String, - Reason String, - Host String, - Event String, - FirstTime String, - LastTime String - ) engine=File(TabSeparated) -` -const rakeesTable DBStatement = ` -CREATE TABLE IF NOT EXISTS rakkess ( - ClusterName String, - Name String, - Create String, - Delete String, - List String, - Update String, - EventTime DateTime('UTC') -) engine=File(TabSeparated) -` +// const kubvizTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS events ( +// ClusterName String, +// Id String, +// EventTime DateTime('UTC'), +// OpType String, +// Name String, +// Namespace String, +// Kind String, +// Message String, +// Reason String, +// Host String, +// Event String, +// FirstTime String, +// LastTime String +// ) engine=File(TabSeparated) +// ` +// const rakeesTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS rakkess ( +// ClusterName String, +// Name String, +// Create String, +// Delete String, +// List String, +// Update String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const kubePugDepricatedTable DBStatement = ` -CREATE TABLE IF NOT EXISTS DeprecatedAPIs ( - ClusterName String, - ObjectName String, - Description String, - Kind String, - Deprecated UInt8, - Scope String, - EventTime DateTime('UTC') -) engine=File(TabSeparated) -` +// const kubePugDepricatedTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS DeprecatedAPIs ( +// ClusterName String, +// ObjectName String, +// Description String, +// Kind String, +// Deprecated UInt8, +// Scope String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const kubepugDeletedTable DBStatement = ` -CREATE TABLE IF NOT EXISTS DeletedAPIs ( - ClusterName String, - ObjectName String, - Group String, - Kind String, - Version String, - Name String, - Deleted UInt8, - Scope String, - EventTime DateTime('UTC') -) engine=File(TabSeparated) -` +// const kubepugDeletedTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS DeletedAPIs ( +// ClusterName String, +// ObjectName String, +// Group String, +// Kind String, +// Version String, +// Name String, +// Deleted UInt8, +// Scope String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const jfrogContainerPushEventTable DBStatement = ` -CREATE TABLE IF NOT EXISTS jfrogcontainerpush ( - Domain String, - EventType String, - RegistryURL String, - RepositoryName String, - SHAID String, - Size Int32, - ImageName String, - Tag String, - Event String, - EventTime DateTime('UTC') -) engine=File(TabSeparated) -` +// const jfrogContainerPushEventTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS jfrogcontainerpush ( +// Domain String, +// EventType String, +// RegistryURL String, +// RepositoryName String, +// SHAID String, +// Size Int32, +// ImageName String, +// Tag String, +// Event String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const ketallTable DBStatement = ` - CREATE TABLE IF NOT EXISTS getall_resources ( - ClusterName String, - Namespace String, - Kind String, - Resource String, - Age String, - EventTime DateTime('UTC') - ) engine=File(TabSeparated) - ` +// const ketallTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS getall_resources ( +// ClusterName String, +// Namespace String, +// Kind String, +// Resource String, +// Age String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const outdateTable DBStatement = ` -CREATE TABLE IF NOT EXISTS outdated_images ( - ClusterName String, - Namespace String, - Pod String, - CurrentImage String, - CurrentTag String, - LatestVersion String, - VersionsBehind Int64, - EventTime DateTime('UTC') -) engine=File(TabSeparated) -` +// const outdateTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS outdated_images ( +// ClusterName String, +// Namespace String, +// Pod String, +// CurrentImage String, +// CurrentTag String, +// LatestVersion String, +// VersionsBehind Int64, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const kubescoreTable DBStatement = ` -CREATE TABLE IF NOT EXISTS kubescore ( - id UUID, - namespace String, - cluster_name String, - recommendations String, - EventTime DateTime('UTC') -) engine=File(TabSeparated) -` +// const kubescoreTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS kubescore ( +// id UUID, +// namespace String, +// cluster_name String, +// recommendations String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const trivyTableVul DBStatement = ` - CREATE TABLE IF NOT EXISTS trivy_vul ( - id UUID, - cluster_name String, - namespace String, - kind String, - name String, - vul_id String, - vul_vendor_ids String, - vul_pkg_id String, - vul_pkg_name String, - vul_pkg_path String, - vul_installed_version String, - vul_fixed_version String, - vul_title String, - vul_severity String, - vul_published_date DateTime('UTC'), - vul_last_modified_date DateTime('UTC') - ) engine=File(TabSeparated) - ` +// const trivyTableVul DBStatement = ` +// CREATE TABLE IF NOT EXISTS trivy_vul ( +// id UUID, +// cluster_name String, +// namespace String, +// kind String, +// name String, +// vul_id String, +// vul_vendor_ids String, +// vul_pkg_id String, +// vul_pkg_name String, +// vul_pkg_path String, +// vul_installed_version String, +// vul_fixed_version String, +// vul_title String, +// vul_severity String, +// vul_published_date DateTime('UTC'), +// vul_last_modified_date DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const trivyTableMisconfig DBStatement = ` - CREATE TABLE IF NOT EXISTS trivy_misconfig ( - id UUID, - cluster_name String, - namespace String, - kind String, - name String, - misconfig_id String, - misconfig_avdid String, - misconfig_type String, - misconfig_title String, - misconfig_desc String, - misconfig_msg String, - misconfig_query String, - misconfig_resolution String, - misconfig_severity String, - misconfig_status String, - EventTime DateTime('UTC') - ) engine=File(TabSeparated) - ` +// const trivyTableMisconfig DBStatement = ` +// CREATE TABLE IF NOT EXISTS trivy_misconfig ( +// id UUID, +// cluster_name String, +// namespace String, +// kind String, +// name String, +// misconfig_id String, +// misconfig_avdid String, +// misconfig_type String, +// misconfig_title String, +// misconfig_desc String, +// misconfig_msg String, +// misconfig_query String, +// misconfig_resolution String, +// misconfig_severity String, +// misconfig_status String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const trivyTableImage DBStatement = ` - CREATE TABLE IF NOT EXISTS trivyimage ( - id UUID, - cluster_name String, - artifact_name String, - vul_id String, - vul_pkg_id String, - vul_pkg_name String, - vul_installed_version String, - vul_fixed_version String, - vul_title String, - vul_severity String, - vul_published_date DateTime('UTC'), - vul_last_modified_date DateTime('UTC') - ) engine=File(TabSeparated) - ` -const dockerHubBuildTable DBStatement = ` - CREATE TABLE IF NOT EXISTS dockerhubbuild ( - PushedBy String, - ImageTag String, - RepositoryName String, - DateCreated String, - Owner String, - Event String, - EventTime DateTime('UTC') - ) engine=File(TabSeparated) - ` +// const trivyTableImage DBStatement = ` +// CREATE TABLE IF NOT EXISTS trivyimage ( +// id UUID, +// cluster_name String, +// artifact_name String, +// vul_id String, +// vul_pkg_id String, +// vul_pkg_name String, +// vul_installed_version String, +// vul_fixed_version String, +// vul_title String, +// vul_severity String, +// vul_published_date DateTime('UTC'), +// vul_last_modified_date DateTime('UTC') +// ) engine=File(TabSeparated) +// ` +// const dockerHubBuildTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS dockerhubbuild ( +// PushedBy String, +// ImageTag String, +// RepositoryName String, +// DateCreated String, +// Owner String, +// Event String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const azureContainerPushEventTable DBStatement = ` - CREATE TABLE IF NOT EXISTS azurecontainerpush ( - RegistryURL String, - RepositoryName String, - Tag String, - ImageName String, - Event String, - Size Int32, - SHAID String, - EventTime DateTime('UTC') - ) engine=File(TabSeparated) - ` +// const azureContainerPushEventTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS azurecontainerpush ( +// RegistryURL String, +// RepositoryName String, +// Tag String, +// ImageName String, +// Event String, +// Size Int32, +// SHAID String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const quayContainerPushEventTable DBStatement = ` - CREATE TABLE IF NOT EXISTS quaycontainerpush ( - name String, - repository String, - nameSpace String, - dockerURL String, - homePage String, - tag String, - Event String, - EventTime DateTime('UTC') - ) engine=File(TabSeparated) - ` +// const quayContainerPushEventTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS quaycontainerpush ( +// name String, +// repository String, +// nameSpace String, +// dockerURL String, +// homePage String, +// tag String, +// Event String, +// EventTime DateTime('UTC') +// ) engine=File(TabSeparated) +// ` -const trivySbomTable DBStatement = ` - CREATE TABLE IF NOT EXISTS trivysbom ( - id UUID, - cluster_name String, - image_name String, - package_name String, - package_url String, - bom_ref String, - serial_number String, - version INTEGER, - bom_format String - ) engine=File(TabSeparated) - ` +// const trivySbomTable DBStatement = ` +// CREATE TABLE IF NOT EXISTS trivysbom ( +// id UUID, +// cluster_name String, +// image_name String, +// package_name String, +// package_url String, +// bom_ref String, +// serial_number String, +// version INTEGER, +// bom_format String +// ) engine=File(TabSeparated) +// ` const InsertDockerHubBuild DBStatement = "INSERT INTO dockerhubbuild (PushedBy, ImageTag, RepositoryName, DateCreated, Owner, Event, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?)" const InsertRakees DBStatement = "INSERT INTO rakkess (ClusterName, Name, Create, Delete, List, Update, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?)" @@ -233,4 +233,4 @@ const InsertAzureContainerPushEvent DBStatement = "INSERT INTO azurecontainerpus const InsertTrivySbom string = "INSERT INTO trivysbom (id, cluster_name, bom_format, serial_number, bom_ref, image_name, component_type, package_url, event_time, other_component_name) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" const InsertQuayContainerPushEvent DBStatement = "INSERT INTO quaycontainerpush (name, repository, nameSpace, dockerURL, homePage, tag, Event, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?)" const InsertJfrogContainerPushEvent DBStatement = "INSERT INTO jfrogcontainerpush (Domain, EventType, RegistryURL, RepositoryName, SHAID, Size, ImageName, Tag, Event, EventTime) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" -const InsertKuberhealthy string = "INSERT INTO kuberhealthy (CurrentUUID, CheckName, OK, Errors, RunDuration, Namespace, Node, LastRun, AuthoritativePod) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" \ No newline at end of file +const InsertKuberhealthy string = "INSERT INTO kuberhealthy (CurrentUUID, CheckName, OK, Errors, RunDuration, Namespace, Node, LastRun, AuthoritativePod) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)" diff --git a/client/pkg/clients/bridge_client.go b/client/pkg/clients/bridge_client.go index 9b721805..3f6e8021 100644 --- a/client/pkg/clients/bridge_client.go +++ b/client/pkg/clients/bridge_client.go @@ -41,7 +41,7 @@ const ( func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { log.Printf("Creating nats consumer %s with subject: %s \n", bridgeConsumer, bridgeSubject) - ctx:=context.Background() + ctx := context.Background() tracer := otel.Tracer("git-client") _, span := tracer.Start(opentelemetry.BuildContext(ctx), "SubscribeGitBridgeNats") span.SetAttributes(attribute.String("git-subscribe", "Subscribe")) @@ -82,7 +82,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { var pl azuremodel.GitPushEvent err := json.Unmarshal([]byte(msg.Data), &pl) if err != nil { - log.Println("error occured while unmarshal the payload Error:", err.Error()) + log.Println("error occurred while unmarshal the payload Error:", err.Error()) return } var gca model.GitCommonAttribute @@ -102,7 +102,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Resource.Repository.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops) + err = conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted AzureDevops metrics:", string(msg.Data)) log.Println() case string(azuremodel.GitPullRequestMergedEventType): @@ -121,7 +125,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Resource.Repository.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops) + err = conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted AzureDevops metrics:", string(msg.Data)) log.Println() default: @@ -134,7 +142,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.EventType = event gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops) + err := conn.InsertGitCommon(gca, dbstatement.InsertAzureDevops) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted GitHub metrics:", string(msg.Data)) log.Println() } @@ -144,7 +156,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { var pl github.PushPayload err := json.Unmarshal([]byte(msg.Data), &pl) if err != nil { - log.Println("error occured while unmarshal the payload Error:", err.Error()) + log.Println("error occurred while unmarshal the payload Error:", err.Error()) return } var gca model.GitCommonAttribute @@ -168,7 +180,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Repository.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGithub) + err = conn.InsertGitCommon(gca, dbstatement.InsertGithub) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted GitHub metrics:", string(msg.Data)) log.Println() case string(github.PullRequestEvent): @@ -192,7 +208,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Repository.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGithub) + err := conn.InsertGitCommon(gca, dbstatement.InsertGithub) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted GitHub metrics:", string(msg.Data)) log.Println() } @@ -206,7 +226,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.EventType = event gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGithub) + err := conn.InsertGitCommon(gca, dbstatement.InsertGithub) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted GitHub metrics:", string(msg.Data)) log.Println() } @@ -216,7 +240,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { var pl gitea.PushPayload err := json.Unmarshal([]byte(msg.Data), &pl) if err != nil { - log.Println("error occured while unmarshal the payload Error:", err.Error()) + log.Println("error occurred while unmarshal the payload Error:", err.Error()) return } var gca model.GitCommonAttribute @@ -232,7 +256,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Repo.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGitea) + err = conn.InsertGitCommon(gca, dbstatement.InsertGitea) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted Gitea metrics:", string(msg.Data)) log.Println() case string(gitea.PullRequestEvent): @@ -266,7 +294,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { } gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGitea) + err := conn.InsertGitCommon(gca, dbstatement.InsertGitea) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted Gitea metrics:", string(msg.Data)) log.Println() } @@ -279,7 +311,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.TimeStamp = time.Now().UTC() gca.RepoName = "" gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGitea) + err := conn.InsertGitCommon(gca, dbstatement.InsertGitea) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted Gitea metrics:", string(msg.Data)) log.Println() } @@ -289,7 +325,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { var pl gitlab.PushEventPayload err := json.Unmarshal([]byte(msg.Data), &pl) if err != nil { - log.Println("error occured while unmarshal the payload Error:", err.Error()) + log.Println("error occurred while unmarshal the payload Error:", err.Error()) return } var gca model.GitCommonAttribute @@ -305,7 +341,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Project.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGitlab) + err = conn.InsertGitCommon(gca, dbstatement.InsertGitlab) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted Gitlab metrics:", string(msg.Data)) log.Println() case string(gitlab.MergeRequestEvents): @@ -325,7 +365,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Project.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGitlab) + err = conn.InsertGitCommon(gca, dbstatement.InsertGitlab) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted Gitlab metrics:", string(msg.Data)) log.Println() } @@ -339,7 +383,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.TimeStamp = time.Now().UTC() gca.RepoName = "" gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertGitlab) + err := conn.InsertGitCommon(gca, dbstatement.InsertGitlab) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted Gitlab metrics:", string(msg.Data)) log.Println() } @@ -349,7 +397,7 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { var pl bitbucket.RepoPushPayload err := json.Unmarshal([]byte(msg.Data), &pl) if err != nil { - log.Println("error occured while unmarshal the payload Error:", err.Error()) + log.Println("error occurred while unmarshal the payload Error:", err.Error()) return } var gca model.GitCommonAttribute @@ -366,7 +414,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Repository.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertBitbucket) + err = conn.InsertGitCommon(gca, dbstatement.InsertBitbucket) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted BitBucket metrics:", string(msg.Data)) log.Println() case string(bitbucket.PullRequestMergedEvent): @@ -385,7 +437,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.RepoName = pl.Repository.Name gca.TimeStamp = time.Now().UTC() gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertBitbucket) + err = conn.InsertGitCommon(gca, dbstatement.InsertBitbucket) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted BitBucket metrics:", string(msg.Data)) log.Println() default: @@ -398,7 +454,11 @@ func (n *NATSContext) SubscribeGitBridgeNats(conn clickhouse.DBInterface) { gca.TimeStamp = time.Now().UTC() gca.RepoName = "" gca.Event = string(msg.Data) - conn.InsertGitCommon(gca, dbstatement.InsertBitbucket) + err := conn.InsertGitCommon(gca, dbstatement.InsertBitbucket) + if err != nil { + log.Println("error occurred while connecting the payload Error:") + + } log.Println("Inserted BitBucket metrics:", string(msg.Data)) log.Println() } diff --git a/client/pkg/clients/container_client.go b/client/pkg/clients/container_client.go index ff571614..0d93a541 100644 --- a/client/pkg/clients/container_client.go +++ b/client/pkg/clients/container_client.go @@ -31,7 +31,7 @@ const ( func (n *NATSContext) SubscribeContainerNats(conn clickhouse.DBInterface) { - ctx:=context.Background() + ctx := context.Background() tracer := otel.Tracer("container-client") _, span := tracer.Start(opentelemetry.BuildContext(ctx), "SubscribeContainerNats") span.SetAttributes(attribute.String("container-subscribe", "Subscribe")) diff --git a/client/pkg/config/config.go b/client/pkg/config/config.go index 94045005..13668e8b 100644 --- a/client/pkg/config/config.go +++ b/client/pkg/config/config.go @@ -3,7 +3,7 @@ package config type Config struct { NatsAddress string `envconfig:"NATS_ADDRESS"` NatsToken string `envconfig:"NATS_TOKEN"` - DbPort int `envconfig:"DB_PORT"` + DBPort int `envconfig:"DB_PORT"` DBAddress string `envconfig:"DB_ADDRESS"` ClickHouseUsername string `envconfig:"CLICKHOUSE_USERNAME"` ClickHousePassword string `envconfig:"CLICKHOUSE_PASSWORD"` @@ -27,7 +27,7 @@ type Config struct { } type GraphQlConfig struct { - DbPort int `envconfig:"DB_PORT"` + DBPort int `envconfig:"DB_PORT"` DBAddress string `envconfig:"DB_ADDRESS"` ClickHouseUsername string `envconfig:"CLICKHOUSE_USERNAME"` ClickHousePassword string `envconfig:"CLICKHOUSE_PASSWORD"` diff --git a/cmd/cli/config/config.go b/cmd/cli/config/config.go index 7862f1c3..536cebed 100644 --- a/cmd/cli/config/config.go +++ b/cmd/cli/config/config.go @@ -5,7 +5,7 @@ import ( ) type Config struct { - DbPort int `envconfig:"DB_PORT" required:"true"` + DBPort int `envconfig:"DB_PORT" required:"true"` DBAddress string `envconfig:"DB_ADDRESS" required:"true"` ClickHouseUsername string `envconfig:"CLICKHOUSE_USERNAME"` ClickHousePassword string `envconfig:"CLICKHOUSE_PASSWORD"` diff --git a/cmd/cli/config/utils.go b/cmd/cli/config/utils.go index 360fbadc..92602b57 100644 --- a/cmd/cli/config/utils.go +++ b/cmd/cli/config/utils.go @@ -22,7 +22,7 @@ func (cfg *Config) openClickHouseConn() (*sql.DB, error) { if cfg.ClickHouseUsername != "" && cfg.ClickHousePassword != "" { fmt.Println("Using provided username and password") options = clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", cfg.DBAddress, cfg.DbPort)}, + Addr: []string{fmt.Sprintf("%s:%d", cfg.DBAddress, cfg.DBPort)}, Debug: true, Auth: clickhouse.Auth{ Username: cfg.ClickHouseUsername, @@ -33,7 +33,7 @@ func (cfg *Config) openClickHouseConn() (*sql.DB, error) { } else { fmt.Println("Using connection without username and password") options = clickhouse.Options{ - Addr: []string{fmt.Sprintf("%s:%d", cfg.DBAddress, cfg.DbPort)}, + Addr: []string{fmt.Sprintf("%s:%d", cfg.DBAddress, cfg.DBPort)}, } } @@ -100,7 +100,7 @@ func (cfg *Config) Migrate() error { conn, err := cfg.openClickHouseConn() if err != nil { - return fmt.Errorf("unable to create a clickhouse conection %w", err) + return fmt.Errorf("unable to create a clickhouse connection %w", err) } driver, err := ch.WithInstance(conn, &ch.Config{}) diff --git a/graphqlserver/graph/utils.go b/graphqlserver/graph/utils.go index dd7bb02a..72f406d4 100644 --- a/graphqlserver/graph/utils.go +++ b/graphqlserver/graph/utils.go @@ -2,10 +2,7 @@ package graph import ( "context" - "database/sql" "fmt" - - "github.com/intelops/kubviz/graphqlserver/graph/model" ) func (r *Resolver) fetchClustersFromDatabase(ctx context.Context) ([]string, error) { @@ -64,95 +61,96 @@ func (r *Resolver) fetchNamespacesFromDatabase(ctx context.Context, clusterName return namespaces, nil } -func (r *Resolver) fetchOutdatedImages(ctx context.Context, namespace string) ([]*model.OutdatedImage, error) { - if r.DB == nil { - return nil, fmt.Errorf("database connection is not initialized") - } - query := `SELECT ClusterName, Namespace, Pod, CurrentImage, CurrentTag, LatestVersion, VersionsBehind, EventTime FROM outdated_images WHERE Namespace = ?` - - rows, err := r.DB.QueryContext(ctx, query, namespace) - if err != nil { - if err == sql.ErrNoRows { - return []*model.OutdatedImage{}, nil - } - return nil, fmt.Errorf("error executing query: %v", err) - } - defer rows.Close() - - var outdatedImages []*model.OutdatedImage - for rows.Next() { - var oi model.OutdatedImage - if err := rows.Scan(&oi.ClusterName, &oi.Namespace, &oi.Pod, &oi.CurrentImage, &oi.CurrentTag, &oi.LatestVersion, &oi.VersionsBehind, &oi.EventTime); err != nil { - return nil, fmt.Errorf("error scanning row: %v", err) - } - outdatedImages = append(outdatedImages, &oi) - } - - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("error iterating rows: %v", err) - } - - return outdatedImages, nil -} -func (r *Resolver) fetchKubeScores(ctx context.Context, namespace string) ([]*model.KubeScore, error) { - if r.DB == nil { - return nil, fmt.Errorf("database connection is not initialized") - } - - query := `SELECT id, clustername, object_name, kind, apiVersion, name, namespace, target_type, description, path, summary, file_name, file_row, EventTime FROM kubescore WHERE namespace = ?` - rows, err := r.DB.QueryContext(ctx, query, namespace) - if err != nil { - if err == sql.ErrNoRows { - // No data for the namespace, return an empty slice - return []*model.KubeScore{}, nil - } - return nil, fmt.Errorf("error executing query: %v", err) - } - defer rows.Close() - - var kubeScores []*model.KubeScore - for rows.Next() { - var ks model.KubeScore - if err := rows.Scan(&ks.ID, &ks.ClusterName, &ks.ObjectName, &ks.Kind, &ks.APIVersion, &ks.Name, &ks.Namespace, &ks.TargetType, &ks.Description, &ks.Path, &ks.Summary, &ks.FileName, &ks.FileRow, &ks.EventTime); err != nil { - return nil, fmt.Errorf("error scanning row: %v", err) - } - kubeScores = append(kubeScores, &ks) - } - - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("error iterating rows: %v", err) - } - - return kubeScores, nil -} -func (r *Resolver) fetchResources(ctx context.Context, namespace string) ([]*model.Resource, error) { - if r.DB == nil { - return nil, fmt.Errorf("database connection is not initialized") - } - - query := `SELECT ClusterName, Namespace, Kind, Resource, Age, EventTime FROM getall_resources WHERE Namespace = ?` - rows, err := r.DB.QueryContext(ctx, query, namespace) - if err != nil { - if err == sql.ErrNoRows { - // No data for the namespace, return an empty slice - return []*model.Resource{}, nil - } - return nil, fmt.Errorf("error executing query: %v", err) - } - defer rows.Close() - var resources []*model.Resource - for rows.Next() { - var res model.Resource - if err := rows.Scan(&res.ClusterName, &res.Namespace, &res.Kind, &res.Resource, &res.Age, &res.EventTime); err != nil { - return nil, fmt.Errorf("error scanning row: %v", err) - } - resources = append(resources, &res) - } - - if err := rows.Err(); err != nil { - return nil, fmt.Errorf("error iterating rows: %v", err) - } - - return resources, nil -} +// func (r *Resolver) fetchOutdatedImages(ctx context.Context, namespace string) ([]*model.OutdatedImage, error) { +// if r.DB == nil { +// return nil, fmt.Errorf("database connection is not initialized") +// } +// query := `SELECT ClusterName, Namespace, Pod, CurrentImage, CurrentTag, LatestVersion, VersionsBehind, EventTime FROM outdated_images WHERE Namespace = ?` + +// rows, err := r.DB.QueryContext(ctx, query, namespace) +// if err != nil { +// if err == sql.ErrNoRows { +// return []*model.OutdatedImage{}, nil +// } +// return nil, fmt.Errorf("error executing query: %v", err) +// } +// defer rows.Close() + +// var outdatedImages []*model.OutdatedImage +// for rows.Next() { +// var oi model.OutdatedImage +// if err := rows.Scan(&oi.ClusterName, &oi.Namespace, &oi.Pod, &oi.CurrentImage, &oi.CurrentTag, &oi.LatestVersion, &oi.VersionsBehind, &oi.EventTime); err != nil { +// return nil, fmt.Errorf("error scanning row: %v", err) +// } +// outdatedImages = append(outdatedImages, &oi) +// } + +// if err := rows.Err(); err != nil { +// return nil, fmt.Errorf("error iterating rows: %v", err) +// } + +// return outdatedImages, nil +// } +// func (r *Resolver) fetchKubeScores(ctx context.Context, namespace string) ([]*model.KubeScore, error) { +// if r.DB == nil { +// return nil, fmt.Errorf("database connection is not initialized") +// } + +// query := `SELECT id, clustername, object_name, kind, apiVersion, name, namespace, target_type, description, path, summary, file_name, file_row, EventTime FROM kubescore WHERE namespace = ?` +// rows, err := r.DB.QueryContext(ctx, query, namespace) +// if err != nil { +// if err == sql.ErrNoRows { +// // No data for the namespace, return an empty slice +// return []*model.KubeScore{}, nil +// } +// return nil, fmt.Errorf("error executing query: %v", err) +// } +// defer rows.Close() + +// var kubeScores []*model.KubeScore +// for rows.Next() { +// var ks model.KubeScore +// if err := rows.Scan(&ks.ID, &ks.ClusterName, &ks.ObjectName, &ks.Kind, &ks.APIVersion, &ks.Name, &ks.Namespace, &ks.TargetType, &ks.Description, &ks.Path, &ks.Summary, &ks.FileName, &ks.FileRow, &ks.EventTime); err != nil { +// return nil, fmt.Errorf("error scanning row: %v", err) +// } +// kubeScores = append(kubeScores, &ks) +// } + +// if err := rows.Err(); err != nil { +// return nil, fmt.Errorf("error iterating rows: %v", err) +// } + +// return kubeScores, nil +// } +// func (r *Resolver) fetchResources(ctx context.Context, namespace string) ([]*model.Resource, error) { +// if r.DB == nil { +// return nil, fmt.Errorf("database connection is not initialized") +// } + +// query := `SELECT ClusterName, Namespace, Kind, Resource, Age, EventTime FROM getall_resources WHERE Namespace = ?` +// rows, err := r.DB.QueryContext(ctx, query, namespace) +// if err != nil { +// if err == sql.ErrNoRows { +// // No data for the namespace, return an empty slice +// return []*model.Resource{}, nil +// } +// return nil, fmt.Errorf("error executing query: %v", err) +// } +// defer rows.Close() + +// var resources []*model.Resource +// for rows.Next() { +// var res model.Resource +// if err := rows.Scan(&res.ClusterName, &res.Namespace, &res.Kind, &res.Resource, &res.Age, &res.EventTime); err != nil { +// return nil, fmt.Errorf("error scanning row: %v", err) +// } +// resources = append(resources, &res) +// } + +// if err := rows.Err(); err != nil { +// return nil, fmt.Errorf("error iterating rows: %v", err) +// } + +// return resources, nil +// } diff --git a/graphqlserver/server.go b/graphqlserver/server.go index 8e191aff..7e540545 100644 --- a/graphqlserver/server.go +++ b/graphqlserver/server.go @@ -50,7 +50,7 @@ func initializeDatabase(cfg *config.GraphQlConfig) (*sql.DB, error) { var db *sql.DB var err error var config = &config.Config{ - DbPort: cfg.DbPort, + DBPort: cfg.DBPort, DBAddress: cfg.DBAddress, ClickHouseUsername: cfg.ClickHouseUsername, ClickHousePassword: cfg.ClickHousePassword, diff --git a/pkg/mtlsnats/mtlsnats.go b/pkg/mtlsnats/mtlsnats.go index bc82cc94..a68ca40a 100644 --- a/pkg/mtlsnats/mtlsnats.go +++ b/pkg/mtlsnats/mtlsnats.go @@ -43,7 +43,7 @@ func ReadMtlsCerts(certificateFilePath, keyFilePath, CAFilePath string) (certPEM func OpenMtlsCertFile(filepath string) (f *os.File, err error) { f, err = os.Open(filepath) if err != nil { - return nil, fmt.Errorf("Failed to open mtls certificate file: %w", err) + return nil, fmt.Errorf("failed to open mtls certificate file: %w", err) } return f, nil } @@ -58,7 +58,7 @@ func ReadMtlsFileContents(filePath string) ([]byte, error) { contents, err := io.ReadAll(file) if err != nil { - return nil, fmt.Errorf("Error while reading file %s:%w", filePath, err) + return nil, fmt.Errorf("error while reading file %s:%w", filePath, err) } return contents, nil @@ -70,19 +70,19 @@ func GetTlsConfig() (*tls.Config, error) { err := envconfig.Process("", &cfg) if err != nil { - return nil, fmt.Errorf("Unable to read mtls config %w", err) + return nil, fmt.Errorf("unable to read mtls config %w", err) } certPEM, keyPEM, CACertPEM, err := ReadMtlsCerts(cfg.CertificateFilePath, cfg.KeyFilePath, cfg.CAFilePath) if err != nil { - return nil, fmt.Errorf("Unable to read mtls certificates %w", err) + return nil, fmt.Errorf("unable to read mtls certificates %w", err) } cert, err := tls.X509KeyPair(certPEM, keyPEM) if err != nil { - return nil, fmt.Errorf("Error loading X509 key pair from PEM: %w", err) + return nil, fmt.Errorf("error loading X509 key pair from PEM: %w", err) } caCertPool := x509.NewCertPool()