Skip to content

Commit

Permalink
chore(controller): simplify sharding code (#21244)
Browse files Browse the repository at this point in the history
Signed-off-by: Michael Crenshaw <[email protected]>
  • Loading branch information
crenshaw-dev authored Dec 18, 2024
1 parent 34fd729 commit ab07b0a
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 37 deletions.
20 changes: 0 additions & 20 deletions controller/sharding/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ type ClusterShardingCache interface {
UpdateApp(a *v1alpha1.Application)
IsManagedCluster(c *v1alpha1.Cluster) bool
GetDistribution() map[string]int
GetAppDistribution() map[string]int
}

type ClusterSharding struct {
Expand Down Expand Up @@ -244,22 +243,3 @@ func (sharding *ClusterSharding) UpdateApp(a *v1alpha1.Application) {
log.Debugf("Skipping sharding distribution update. No relevant changes")
}
}

// GetAppDistribution should be not be called from a DestributionFunction because
// it could cause a deadlock when updateDistribution is called.
func (sharding *ClusterSharding) GetAppDistribution() map[string]int {
sharding.lock.RLock()
clusters := sharding.Clusters
apps := sharding.Apps
sharding.lock.RUnlock()

appDistribution := make(map[string]int, len(clusters))

for _, a := range apps {
if _, ok := appDistribution[a.Spec.Destination.Server]; !ok {
appDistribution[a.Spec.Destination.Server] = 0
}
appDistribution[a.Spec.Destination.Server]++
}
return appDistribution
}
36 changes: 19 additions & 17 deletions controller/sharding/sharding_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -988,35 +988,37 @@ func TestGetClusterSharding(t *testing.T) {
}

func TestAppAwareCache(t *testing.T) {
_, db, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
_, _, cluster1, cluster2, cluster3, cluster4, cluster5 := createTestClusters()
_, app1, app2, app3, app4, app5 := createTestApps()

clusterSharding := NewClusterSharding(db, 0, 1, "legacy")
clusterList := getClusterPointers([]v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5})
appList := getAppPointers([]v1alpha1.Application{app1, app2, app3, app4, app5})

clusterList := &v1alpha1.ClusterList{Items: []v1alpha1.Cluster{cluster1, cluster2, cluster3, cluster4, cluster5}}
appList := &v1alpha1.ApplicationList{Items: []v1alpha1.Application{app1, app2, app3, app4, app5}}
clusterSharding.Init(clusterList, appList)
getClusters := func() []*v1alpha1.Cluster { return clusterList }
getApps := func() []*v1alpha1.Application { return appList }

appDistribution := clusterSharding.GetAppDistribution()
appDistribution := getAppDistribution(getClusters, getApps)

assert.Equal(t, 2, appDistribution["cluster1"])
assert.Equal(t, 2, appDistribution["cluster2"])
assert.Equal(t, 1, appDistribution["cluster3"])
assert.Equal(t, int64(2), appDistribution["cluster1"])
assert.Equal(t, int64(2), appDistribution["cluster2"])
assert.Equal(t, int64(1), appDistribution["cluster3"])

app6 := createApp("app6", "cluster4")
clusterSharding.AddApp(&app6)
appList = append(appList, &app6)

app1Update := createApp("app1", "cluster2")
clusterSharding.UpdateApp(&app1Update)
// replace app 1
appList[0] = &app1Update

clusterSharding.DeleteApp(&app3)
// Remove app 3
appList = append(appList[:2], appList[3:]...)

appDistribution = clusterSharding.GetAppDistribution()
appDistribution = getAppDistribution(getClusters, getApps)

assert.Equal(t, 1, appDistribution["cluster1"])
assert.Equal(t, 2, appDistribution["cluster2"])
assert.Equal(t, 1, appDistribution["cluster3"])
assert.Equal(t, 1, appDistribution["cluster4"])
assert.Equal(t, int64(1), appDistribution["cluster1"])
assert.Equal(t, int64(2), appDistribution["cluster2"])
assert.Equal(t, int64(1), appDistribution["cluster3"])
assert.Equal(t, int64(1), appDistribution["cluster4"])
}

func createTestApps() (appAccessor, v1alpha1.Application, v1alpha1.Application, v1alpha1.Application, v1alpha1.Application, v1alpha1.Application) {
Expand Down

0 comments on commit ab07b0a

Please sign in to comment.