diff --git a/felix/calc/calc_graph.go b/felix/calc/calc_graph.go index 7330926e058..265ab42fd1b 100644 --- a/felix/calc/calc_graph.go +++ b/felix/calc/calc_graph.go @@ -25,6 +25,7 @@ import ( "github.com/projectcalico/calico/felix/labelindex" "github.com/projectcalico/calico/felix/proto" "github.com/projectcalico/calico/felix/serviceindex" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/backend/api" "github.com/projectcalico/calico/libcalico-go/lib/backend/model" "github.com/projectcalico/calico/libcalico-go/lib/net" @@ -78,9 +79,9 @@ type passthruCallbacks interface { OnIPPoolUpdate(model.IPPoolKey, *model.IPPool) OnIPPoolRemove(model.IPPoolKey) OnServiceAccountUpdate(*proto.ServiceAccountUpdate) - OnServiceAccountRemove(proto.ServiceAccountID) + OnServiceAccountRemove(types.ServiceAccountID) OnNamespaceUpdate(*proto.NamespaceUpdate) - OnNamespaceRemove(proto.NamespaceID) + OnNamespaceRemove(types.NamespaceID) OnWireguardUpdate(string, *model.Wireguard) OnWireguardRemove(string) OnGlobalBGPConfigUpdate(*v3.BGPConfiguration) diff --git a/felix/calc/encapsulation_resolver_test.go b/felix/calc/encapsulation_resolver_test.go index ea9beeb04ab..057a0b658e5 100644 --- a/felix/calc/encapsulation_resolver_test.go +++ b/felix/calc/encapsulation_resolver_test.go @@ -21,6 +21,7 @@ import ( "github.com/projectcalico/calico/felix/config" "github.com/projectcalico/calico/felix/dispatcher" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/backend/api" "github.com/projectcalico/calico/libcalico-go/lib/backend/encap" @@ -495,7 +496,7 @@ func (e *encapResolverCallbackRecorder) OnServiceAccountUpdate(update *proto.Ser Fail("ServiceAccountUpdate received") } -func (e *encapResolverCallbackRecorder) OnServiceAccountRemove(id proto.ServiceAccountID) { +func (e *encapResolverCallbackRecorder) OnServiceAccountRemove(id types.ServiceAccountID) { Fail("ServiceAccountRemove received") } @@ -503,7 +504,7 @@ func (e *encapResolverCallbackRecorder) OnNamespaceUpdate(update *proto.Namespac Fail("NamespaceUpdate received") } -func (e *encapResolverCallbackRecorder) OnNamespaceRemove(id proto.NamespaceID) { +func (e *encapResolverCallbackRecorder) OnNamespaceRemove(id types.NamespaceID) { Fail("NamespaceRemove received") } diff --git a/felix/calc/event_sequencer.go b/felix/calc/event_sequencer.go index ebe14aa3b18..f52daac6cd3 100644 --- a/felix/calc/event_sequencer.go +++ b/felix/calc/event_sequencer.go @@ -27,6 +27,7 @@ import ( "github.com/projectcalico/calico/felix/labelindex" "github.com/projectcalico/calico/felix/multidict" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/backend/model" "github.com/projectcalico/calico/libcalico-go/lib/net" "github.com/projectcalico/calico/libcalico-go/lib/set" @@ -70,10 +71,10 @@ type EventSequencer struct { pendingNotReady bool pendingGlobalConfig map[string]string pendingHostConfig map[string]string - pendingServiceAccountUpdates map[proto.ServiceAccountID]*proto.ServiceAccountUpdate - pendingServiceAccountDeletes set.Set[proto.ServiceAccountID] - pendingNamespaceUpdates map[proto.NamespaceID]*proto.NamespaceUpdate - pendingNamespaceDeletes set.Set[proto.NamespaceID] + pendingServiceAccountUpdates map[types.ServiceAccountID]*proto.ServiceAccountUpdate + pendingServiceAccountDeletes set.Set[types.ServiceAccountID] + pendingNamespaceUpdates map[types.NamespaceID]*proto.NamespaceUpdate + pendingNamespaceDeletes set.Set[types.NamespaceID] pendingRouteUpdates map[routeID]*proto.RouteUpdate pendingRouteDeletes set.Set[routeID] pendingVTEPUpdates map[string]*proto.VXLANTunnelEndpointUpdate @@ -93,8 +94,8 @@ type EventSequencer struct { sentHostIPv6s set.Set[string] sentHosts set.Set[string] sentIPPools set.Set[ip.CIDR] - sentServiceAccounts set.Set[proto.ServiceAccountID] - sentNamespaces set.Set[proto.NamespaceID] + sentServiceAccounts set.Set[types.ServiceAccountID] + sentNamespaces set.Set[types.NamespaceID] sentRoutes set.Set[routeID] sentVTEPs set.Set[string] sentWireguard set.Set[string] @@ -149,10 +150,10 @@ func NewEventSequencer(conf configInterface) *EventSequencer { pendingHostMetadataDeletes: set.New[string](), pendingIPPoolUpdates: map[ip.CIDR]*model.IPPool{}, pendingIPPoolDeletes: set.New[ip.CIDR](), - pendingServiceAccountUpdates: map[proto.ServiceAccountID]*proto.ServiceAccountUpdate{}, - pendingServiceAccountDeletes: set.New[proto.ServiceAccountID](), - pendingNamespaceUpdates: map[proto.NamespaceID]*proto.NamespaceUpdate{}, - pendingNamespaceDeletes: set.New[proto.NamespaceID](), + pendingServiceAccountUpdates: map[types.ServiceAccountID]*proto.ServiceAccountUpdate{}, + pendingServiceAccountDeletes: set.New[types.ServiceAccountID](), + pendingNamespaceUpdates: map[types.NamespaceID]*proto.NamespaceUpdate{}, + pendingNamespaceDeletes: set.New[types.NamespaceID](), pendingRouteUpdates: map[routeID]*proto.RouteUpdate{}, pendingRouteDeletes: set.New[routeID](), pendingVTEPUpdates: map[string]*proto.VXLANTunnelEndpointUpdate{}, @@ -171,8 +172,8 @@ func NewEventSequencer(conf configInterface) *EventSequencer { sentHostIPv6s: set.New[string](), sentHosts: set.New[string](), sentIPPools: set.New[ip.CIDR](), - sentServiceAccounts: set.New[proto.ServiceAccountID](), - sentNamespaces: set.New[proto.NamespaceID](), + sentServiceAccounts: set.New[types.ServiceAccountID](), + sentNamespaces: set.New[types.NamespaceID](), sentRoutes: set.New[routeID](), sentVTEPs: set.New[string](), sentWireguard: set.New[string](), @@ -887,7 +888,7 @@ func (buf *EventSequencer) flushAddsOrRemoves(setID string) { func (buf *EventSequencer) OnServiceAccountUpdate(update *proto.ServiceAccountUpdate) { // We trust the caller not to send us an update with nil ID, so safe to dereference. - id := *update.Id + id := types.ProtoToServiceAccountID(update.Id) log.WithFields(log.Fields{ "key": id, "labels": update.GetLabels(), @@ -896,7 +897,7 @@ func (buf *EventSequencer) OnServiceAccountUpdate(update *proto.ServiceAccountUp buf.pendingServiceAccountUpdates[id] = update } -func (buf *EventSequencer) OnServiceAccountRemove(id proto.ServiceAccountID) { +func (buf *EventSequencer) OnServiceAccountRemove(id types.ServiceAccountID) { log.WithFields(log.Fields{ "key": id, }).Debug("ServiceAccount removed") @@ -908,8 +909,9 @@ func (buf *EventSequencer) OnServiceAccountRemove(id proto.ServiceAccountID) { func (buf *EventSequencer) flushServiceAccounts() { // Order doesn't matter, but send removes first to reduce max occupancy - buf.pendingServiceAccountDeletes.Iter(func(id proto.ServiceAccountID) error { - msg := proto.ServiceAccountRemove{Id: &id} + buf.pendingServiceAccountDeletes.Iter(func(id types.ServiceAccountID) error { + pid := types.ServiceAccountIDToProto(id) + msg := proto.ServiceAccountRemove{Id: pid} buf.Callback(&msg) buf.sentServiceAccounts.Discard(id) return nil @@ -917,18 +919,18 @@ func (buf *EventSequencer) flushServiceAccounts() { buf.pendingServiceAccountDeletes.Clear() for _, msg := range buf.pendingServiceAccountUpdates { buf.Callback(msg) - id := msg.Id + id := types.ProtoToServiceAccountID(msg.GetId()) // We safely dereferenced the Id in OnServiceAccountUpdate before adding it to the pending updates map, so // it is safe to do so here. - buf.sentServiceAccounts.Add(*id) + buf.sentServiceAccounts.Add(id) } - buf.pendingServiceAccountUpdates = make(map[proto.ServiceAccountID]*proto.ServiceAccountUpdate) + buf.pendingServiceAccountUpdates = make(map[types.ServiceAccountID]*proto.ServiceAccountUpdate) log.Debug("Done flushing Service Accounts") } func (buf *EventSequencer) OnNamespaceUpdate(update *proto.NamespaceUpdate) { // We trust the caller not to send us an update with nil ID, so safe to dereference. - id := *update.Id + id := types.ProtoToNamespaceID(update.GetId()) log.WithFields(log.Fields{ "key": id, "labels": update.GetLabels(), @@ -937,7 +939,7 @@ func (buf *EventSequencer) OnNamespaceUpdate(update *proto.NamespaceUpdate) { buf.pendingNamespaceUpdates[id] = update } -func (buf *EventSequencer) OnNamespaceRemove(id proto.NamespaceID) { +func (buf *EventSequencer) OnNamespaceRemove(id types.NamespaceID) { log.WithFields(log.Fields{ "key": id, }).Debug("Namespace removed") @@ -981,8 +983,9 @@ func (buf *EventSequencer) OnGlobalBGPConfigUpdate(cfg *v3.BGPConfiguration) { func (buf *EventSequencer) flushNamespaces() { // Order doesn't matter, but send removes first to reduce max occupancy - buf.pendingNamespaceDeletes.Iter(func(id proto.NamespaceID) error { - msg := proto.NamespaceRemove{Id: &id} + buf.pendingNamespaceDeletes.Iter(func(id types.NamespaceID) error { + pid := types.NamespaceIDToProto(id) + msg := proto.NamespaceRemove{Id: pid} buf.Callback(&msg) buf.sentNamespaces.Discard(id) return nil @@ -990,12 +993,12 @@ func (buf *EventSequencer) flushNamespaces() { buf.pendingNamespaceDeletes.Clear() for _, msg := range buf.pendingNamespaceUpdates { buf.Callback(msg) - id := msg.Id + id := types.ProtoToNamespaceID(msg.GetId()) // We safely dereferenced the Id in OnNamespaceUpdate before adding it to the pending updates map, so // it is safe to do so here. - buf.sentNamespaces.Add(*id) + buf.sentNamespaces.Add(id) } - buf.pendingNamespaceUpdates = make(map[proto.NamespaceID]*proto.NamespaceUpdate) + buf.pendingNamespaceUpdates = make(map[types.NamespaceID]*proto.NamespaceUpdate) log.Debug("Done flushing Namespaces") } diff --git a/felix/calc/profile_decoder.go b/felix/calc/profile_decoder.go index 63907e35494..66bffa5fff6 100644 --- a/felix/calc/profile_decoder.go +++ b/felix/calc/profile_decoder.go @@ -21,6 +21,7 @@ import ( "github.com/projectcalico/calico/felix/dispatcher" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/backend/api" "github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion" "github.com/projectcalico/calico/libcalico-go/lib/backend/model" @@ -54,22 +55,24 @@ func (p *ProfileDecoder) OnUpdate(update api.Update) (filterOut bool) { switch id := idInterface.(type) { case nil: log.WithField("key", key.String()).Debug("Ignoring Profile labels") - case proto.ServiceAccountID: + case types.ServiceAccountID: + pid := types.ServiceAccountIDToProto(id) if update.Value == nil { p.callbacks.OnServiceAccountRemove(id) } else { labels := update.Value.(*apiv3.Profile).Spec.LabelsToApply msg := proto.ServiceAccountUpdate{ - Id: &id, Labels: decodeLabels(conversion.ServiceAccountLabelPrefix, labels)} + Id: pid, Labels: decodeLabels(conversion.ServiceAccountLabelPrefix, labels)} p.callbacks.OnServiceAccountUpdate(&msg) } - case proto.NamespaceID: + case types.NamespaceID: + pid := types.NamespaceIDToProto(id) if update.Value == nil { p.callbacks.OnNamespaceRemove(id) } else { labels := update.Value.(*apiv3.Profile).Spec.LabelsToApply msg := proto.NamespaceUpdate{ - Id: &id, Labels: decodeLabels(conversion.NamespaceLabelPrefix, labels)} + Id: pid, Labels: decodeLabels(conversion.NamespaceLabelPrefix, labels)} p.callbacks.OnNamespaceUpdate(&msg) } } @@ -79,11 +82,11 @@ func (p *ProfileDecoder) OnUpdate(update api.Update) (filterOut bool) { func (p *ProfileDecoder) classifyProfile(key model.ResourceKey) interface{} { namespace, name, err := p.converter.ProfileNameToServiceAccount(key.Name) if err == nil { - return proto.ServiceAccountID{Name: name, Namespace: namespace} + return types.ServiceAccountID{Name: name, Namespace: namespace} } name, err = p.converter.ProfileNameToNamespace(key.Name) if err == nil { - return proto.NamespaceID{Name: name} + return types.NamespaceID{Name: name} } return nil } diff --git a/felix/calc/profile_decoder_test.go b/felix/calc/profile_decoder_test.go index da320e53684..5fea8390af3 100644 --- a/felix/calc/profile_decoder_test.go +++ b/felix/calc/profile_decoder_test.go @@ -23,6 +23,7 @@ import ( "github.com/projectcalico/calico/felix/calc" "github.com/projectcalico/calico/felix/dispatcher" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/backend/api" "github.com/projectcalico/calico/libcalico-go/lib/backend/k8s/conversion" "github.com/projectcalico/calico/libcalico-go/lib/backend/model" @@ -100,7 +101,7 @@ var _ = Describe("profileDecoder", func() { It("should send k8s service account profile remove", func() { update := removeUpdate(conversion.ServiceAccountProfileNamePrefix + "test_namespace.test_serviceaccount") uut.OnUpdate(update) - Expect(callbacks.saRemoves).To(Equal([]proto.ServiceAccountID{ + Expect(callbacks.saRemoves).To(Equal([]types.ServiceAccountID{ {Name: "test_serviceaccount", Namespace: "test_namespace"}, })) }) @@ -108,7 +109,7 @@ var _ = Describe("profileDecoder", func() { It("should send k8s namespace remove", func() { update := removeUpdate(conversion.NamespaceProfileNamePrefix + "test_namespace") uut.OnUpdate(update) - Expect(callbacks.nsRemoves).To(Equal([]proto.NamespaceID{ + Expect(callbacks.nsRemoves).To(Equal([]types.NamespaceID{ {Name: "test_namespace"}, })) }) @@ -133,9 +134,9 @@ var _ = Describe("profileDecoder", func() { type passthruCallbackRecorder struct { saUpdates []*proto.ServiceAccountUpdate - saRemoves []proto.ServiceAccountID + saRemoves []types.ServiceAccountID nsUpdates []*proto.NamespaceUpdate - nsRemoves []proto.NamespaceID + nsRemoves []types.NamespaceID } func (p *passthruCallbackRecorder) OnHostIPUpdate(hostname string, ip *net.IP) { @@ -182,7 +183,7 @@ func (p *passthruCallbackRecorder) OnServiceAccountUpdate(update *proto.ServiceA p.saUpdates = append(p.saUpdates, update) } -func (p *passthruCallbackRecorder) OnServiceAccountRemove(id proto.ServiceAccountID) { +func (p *passthruCallbackRecorder) OnServiceAccountRemove(id types.ServiceAccountID) { p.saRemoves = append(p.saRemoves, id) } @@ -190,7 +191,7 @@ func (p *passthruCallbackRecorder) OnNamespaceUpdate(update *proto.NamespaceUpda p.nsUpdates = append(p.nsUpdates, update) } -func (p *passthruCallbackRecorder) OnNamespaceRemove(id proto.NamespaceID) { +func (p *passthruCallbackRecorder) OnNamespaceRemove(id types.NamespaceID) { p.nsRemoves = append(p.nsRemoves, id) } diff --git a/felix/calc/rule_convert.go b/felix/calc/rule_convert.go index 52f5d9bba2c..3b765353ad6 100644 --- a/felix/calc/rule_convert.go +++ b/felix/calc/rule_convert.go @@ -19,6 +19,7 @@ import ( "encoding/base64" log "github.com/sirupsen/logrus" + googleproto "google.golang.org/protobuf/proto" "github.com/projectcalico/api/pkg/lib/numorstring" @@ -63,7 +64,7 @@ func fillInRuleIDs(rules []*proto.Rule, ruleIDSeed string) { // library. // TODO(smc) Can we do better than hashing the protobuf? rule.RuleId = "" - data, err := rule.Marshal() + data, err := googleproto.Marshal(rule) if err != nil { log.WithError(err).WithField("rule", rule).Panic("Failed to marshal rule") } diff --git a/felix/calc/state_test.go b/felix/calc/state_test.go index c76575d667d..961f972a320 100644 --- a/felix/calc/state_test.go +++ b/felix/calc/state_test.go @@ -22,6 +22,7 @@ import ( "github.com/projectcalico/calico/felix/dataplane/mock" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/backend/api" "github.com/projectcalico/calico/libcalico-go/lib/backend/model" "github.com/projectcalico/calico/libcalico-go/lib/set" @@ -35,10 +36,10 @@ type State struct { // than a map to give us a deterministic ordering of injection. DatastoreState []model.KVPair ExpectedIPSets map[string]set.Set[string] - ExpectedPolicyIDs set.Set[proto.PolicyID] - ExpectedUntrackedPolicyIDs set.Set[proto.PolicyID] - ExpectedPreDNATPolicyIDs set.Set[proto.PolicyID] - ExpectedProfileIDs set.Set[proto.ProfileID] + ExpectedPolicyIDs set.Set[types.PolicyID] + ExpectedUntrackedPolicyIDs set.Set[types.PolicyID] + ExpectedPreDNATPolicyIDs set.Set[types.PolicyID] + ExpectedProfileIDs set.Set[types.ProfileID] ExpectedRoutes set.Set[proto.RouteUpdate] ExpectedVTEPs set.Set[proto.VXLANTunnelEndpointUpdate] ExpectedWireguardEndpoints set.Set[proto.WireguardEndpointUpdate] @@ -62,10 +63,10 @@ func NewState() State { return State{ DatastoreState: []model.KVPair{}, ExpectedIPSets: make(map[string]set.Set[string]), - ExpectedPolicyIDs: set.New[proto.PolicyID](), - ExpectedUntrackedPolicyIDs: set.New[proto.PolicyID](), - ExpectedPreDNATPolicyIDs: set.New[proto.PolicyID](), - ExpectedProfileIDs: set.New[proto.ProfileID](), + ExpectedPolicyIDs: set.New[types.PolicyID](), + ExpectedUntrackedPolicyIDs: set.New[types.PolicyID](), + ExpectedPreDNATPolicyIDs: set.New[types.PolicyID](), + ExpectedProfileIDs: set.New[types.ProfileID](), ExpectedRoutes: set.New[proto.RouteUpdate](), ExpectedVTEPs: set.New[proto.VXLANTunnelEndpointUpdate](), ExpectedWireguardEndpoints: set.New[proto.WireguardEndpointUpdate](), @@ -188,9 +189,9 @@ func (s State) withName(name string) (newState State) { return newState } -func (s State) withActivePolicies(ids ...proto.PolicyID) (newState State) { +func (s State) withActivePolicies(ids ...types.PolicyID) (newState State) { newState = s.Copy() - newState.ExpectedPolicyIDs = set.New[proto.PolicyID]() + newState.ExpectedPolicyIDs = set.New[types.PolicyID]() for _, id := range ids { newState.ExpectedPolicyIDs.Add(id) } @@ -203,27 +204,27 @@ func (s State) withTotalALPPolicies(count int) (newState State) { return newState } -func (s State) withUntrackedPolicies(ids ...proto.PolicyID) (newState State) { +func (s State) withUntrackedPolicies(ids ...types.PolicyID) (newState State) { newState = s.Copy() - newState.ExpectedUntrackedPolicyIDs = set.New[proto.PolicyID]() + newState.ExpectedUntrackedPolicyIDs = set.New[types.PolicyID]() for _, id := range ids { newState.ExpectedUntrackedPolicyIDs.Add(id) } return newState } -func (s State) withPreDNATPolicies(ids ...proto.PolicyID) (newState State) { +func (s State) withPreDNATPolicies(ids ...types.PolicyID) (newState State) { newState = s.Copy() - newState.ExpectedPreDNATPolicyIDs = set.New[proto.PolicyID]() + newState.ExpectedPreDNATPolicyIDs = set.New[types.PolicyID]() for _, id := range ids { newState.ExpectedPreDNATPolicyIDs.Add(id) } return newState } -func (s State) withActiveProfiles(ids ...proto.ProfileID) (newState State) { +func (s State) withActiveProfiles(ids ...types.ProfileID) (newState State) { newState = s.Copy() - newState.ExpectedProfileIDs = set.New[proto.ProfileID]() + newState.ExpectedProfileIDs = set.New[types.ProfileID]() for _, id := range ids { newState.ExpectedProfileIDs.Add(id) } diff --git a/felix/calc/states_for_test.go b/felix/calc/states_for_test.go index fe4bacc4c79..418be9855f6 100644 --- a/felix/calc/states_for_test.go +++ b/felix/calc/states_for_test.go @@ -27,6 +27,7 @@ import ( "github.com/projectcalico/calico/felix/dataplane/mock" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" ) // Pre-defined datastore states. Each State object wraps up the complete state @@ -58,7 +59,7 @@ var withPolicy = initialisedStore.withKVUpdates( var withPolicyAlways = initialisedStore.withKVUpdates( pol1KVPairAlways, ).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withIPSet(allSelectorId, []string{}).withIPSet(bEqBSelectorId, []string{}).withName("with always-programmed policy") // withPolicyIngressOnly adds a tier and ingress policy containing selectors for all @@ -159,11 +160,11 @@ var localEp1WithPolicy = withPolicy.withKVUpdates( "10.0.0.2/32", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ @@ -211,11 +212,11 @@ var localEp1WithNegatedNamedPortPolicy = empty.withKVUpdates( "fc00:fe11::1/128", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ @@ -255,9 +256,9 @@ var localHostEp1WithNamedPortPolicy = empty.withKVUpdates( "10.0.0.2/32", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-1"}, ).withEndpoint( "named", []mock.TierInfo{ @@ -284,11 +285,11 @@ var localEp1WithIngressPolicy = withPolicyIngressOnly.withKVUpdates( "10.0.0.2/32", // ep1 and ep2 "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ @@ -325,11 +326,11 @@ var hostEp1WithPolicy = withPolicy.withKVUpdates( "10.0.0.2/32", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( hostEpWithNameId, []mock.TierInfo{ @@ -345,11 +346,11 @@ var hostEp1WithIngressPolicy = withPolicyIngressOnly.withKVUpdates( "10.0.0.2/32", // ep1 and ep2 "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( hostEpWithNameId, []mock.TierInfo{ @@ -365,11 +366,11 @@ var hostEp1WithEgressPolicy = withPolicyEgressOnly.withKVUpdates( "10.0.0.2/32", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( hostEpWithNameId, []mock.TierInfo{ @@ -390,13 +391,13 @@ var hostEp1WithUntrackedPolicy = withUntrackedPolicy.withKVUpdates( "10.0.0.2/32", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withUntrackedPolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpointUntracked( hostEpWithNameId, []mock.TierInfo{}, @@ -414,13 +415,13 @@ var hostEp1WithPreDNATPolicy = withPreDNATPolicy.withKVUpdates( "10.0.0.2/32", // ep1 and ep2 "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pre-dnat-pol-1"}, + types.PolicyID{Tier: "default", Name: "pre-dnat-pol-1"}, ).withPreDNATPolicies( - proto.PolicyID{Tier: "default", Name: "pre-dnat-pol-1"}, + types.PolicyID{Tier: "default", Name: "pre-dnat-pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpointUntracked( hostEpWithNameId, []mock.TierInfo{}, @@ -433,8 +434,8 @@ var hostEp1WithPreDNATPolicy = withPreDNATPolicy.withKVUpdates( var hostEp1WithTrackedAndUntrackedPolicy = hostEp1WithUntrackedPolicy.withKVUpdates( KVPair{Key: PolicyKey{Name: "pol-2"}, Value: &policy1_order20}, ).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, - proto.PolicyID{Tier: "default", Name: "pol-2"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-2"}, ).withEndpointUntracked( hostEpWithNameId, []mock.TierInfo{ @@ -454,10 +455,10 @@ var hostEp2WithPolicy = withPolicy.withKVUpdates( "10.0.0.3/32", // ep2 "fc00:fe11::3/128", }).withIPSet(bEqBSelectorId, []string{}).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-3"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-3"}, ).withEndpoint( hostEpNoNameId, []mock.TierInfo{ @@ -508,13 +509,13 @@ func policyOrderState(policyOrders [3]float64, expectedOrder [3]string) State { "10.0.0.2/32", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, - proto.PolicyID{Tier: "default", Name: "pol-2"}, - proto.PolicyID{Tier: "default", Name: "pol-3"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-2"}, + types.PolicyID{Tier: "default", Name: "pol-3"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ @@ -542,10 +543,10 @@ var localEp2WithPolicy = withPolicy.withKVUpdates( }).withIPSet( bEqBSelectorId, []string{}, ).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-3"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-3"}, ).withEndpoint( localWlEp2Id, []mock.TierInfo{ @@ -579,12 +580,12 @@ var localEpsWithPolicy = withPolicy.withKVUpdates( "10.0.0.2/32", "fc00:fe11::2/128", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "pol-1"}, + types.PolicyID{Tier: "default", Name: "pol-1"}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-3"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-3"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ @@ -663,10 +664,10 @@ var localEpsWithOverlappingIPsAndInheritedLabels = empty.withKVUpdates( localWlEp2Id, []mock.TierInfo{}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-3"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-3"}, + types.ProfileID{Name: "prof-missing"}, ).withRoutes( // Routes for the local WEPs. routelocalWlTenDotOne, @@ -682,7 +683,7 @@ var localEpsWithOverlappingIPsAndInheritedLabels = empty.withKVUpdates( var localEpsAndNamedPortPolicyMatchingInheritedLabelOnEP1 = localEpsWithOverlappingIPsAndInheritedLabels.withKVUpdates( KVPair{Key: PolicyKey{Name: "inherit-pol"}, Value: &policy_with_named_port_inherit}, ).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "inherit-pol"}, + types.PolicyID{Tier: "default", Name: "inherit-pol"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{{ @@ -767,9 +768,9 @@ var localEpsAndNamedPortPolicyEP2ProfileRemoved = localEpsAndNamedPortPolicyMatc "fc00:fe11::2,tcp:8080", // ep2 no longer matches }).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withName("2 local WEPs with policy matching inherited label on WEP1; WEP2 has no profile") // Then do the same for EP1. @@ -865,10 +866,10 @@ var localEpsWithProfile = withProfile.withKVUpdates( "10.0.0.2/32", "fc00:fe11::2/128", }).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-3"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-3"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{}, @@ -953,10 +954,10 @@ var localEpsWithTagInheritProfile = withProfileTagInherit.withKVUpdates( }).withIPSet( tagFoobarSelectorId, []string{}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-3"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-3"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{}, ).withEndpoint( @@ -993,10 +994,10 @@ var localEpsWithTagOverriddenProfile = withProfileTagOverridden.withKVUpdates( "10.0.0.2/32", // ep1 and ep2 "fc00:fe11::2/128", }).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-3"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-3"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{}, @@ -1546,9 +1547,9 @@ var vxlanLocalBlockWithBorrowsLocalWEP = vxlanLocalBlockWithBorrows.withKVUpdate routelocalWlV6ColonOne, routelocalWlV6ColonTwo, ).withName("VXLAN local with borrows with local WEP override").withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withEndpoint("orch/wl1/ep1", []mock.TierInfo{}) // As vxlanLocalBlockWithBorrows but using Node resources instead of host IPs. @@ -2482,9 +2483,9 @@ var endpointSliceAndLocalWorkload = empty.withKVUpdates( localWlEp1Id, []mock.TierInfo{}, ).withActiveProfiles( - proto.ProfileID{Name: "prof-1"}, - proto.ProfileID{Name: "prof-2"}, - proto.ProfileID{Name: "prof-missing"}, + types.ProfileID{Name: "prof-1"}, + types.ProfileID{Name: "prof-2"}, + types.ProfileID{Name: "prof-missing"}, ).withName("EndpointSliceInactive") // Add a network policy that makes the endpoint slice active. @@ -2493,7 +2494,7 @@ var endpointSliceActive = endpointSliceAndLocalWorkload.withKVUpdates( ).withName("EndpointSliceActive").withIPSet("svc:Jhwii46PCMT5NlhWsUqZmv7al8TeHFbNQMhoVg", []string{ "10.0.0.1,tcp:80", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "svc-policy"}, + types.PolicyID{Tier: "default", Name: "svc-policy"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ @@ -2542,7 +2543,7 @@ var endpointSliceActiveSpecNoPorts = endpointSliceAndLocalWorkload.withKVUpdates ).withName("EndpointSliceActiveNoPorts").withIPSet("svcnoport:T03S_6hogdrGKrNFBcbKTFsH_uKwDHEo8JddOg", []string{ "10.0.0.1/32", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "svc-policy"}, + types.PolicyID{Tier: "default", Name: "svc-policy"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ @@ -2558,8 +2559,8 @@ var endpointSliceActiveSpecPortsAndNoPorts = endpointSliceActiveSpecNoPorts.with ).withIPSet("svc:Jhwii46PCMT5NlhWsUqZmv7al8TeHFbNQMhoVg", []string{ "10.0.0.1,tcp:80", }).withActivePolicies( - proto.PolicyID{Tier: "default", Name: "svc-policy"}, - proto.PolicyID{Tier: "default", Name: "svc-policy2"}, + types.PolicyID{Tier: "default", Name: "svc-policy"}, + types.PolicyID{Tier: "default", Name: "svc-policy2"}, ).withEndpoint( localWlEp1Id, []mock.TierInfo{ diff --git a/felix/dataplane/common/callbacks.go b/felix/dataplane/common/callbacks.go index d4afdb763f3..d134f671e91 100644 --- a/felix/dataplane/common/callbacks.go +++ b/felix/dataplane/common/callbacks.go @@ -16,6 +16,7 @@ package common import ( "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" ) type Callbacks struct { @@ -51,13 +52,13 @@ type CbID struct { dropper func() } -type AddInterfaceFunc func(ifaceName string, hostEPID proto.HostEndpointID) +type AddInterfaceFunc func(ifaceName string, hostEPID types.HostEndpointID) type AddInterfaceFuncs struct { fs AddInterfaceFunc } -func (fs *AddInterfaceFuncs) Invoke(ifaceName string, hostEPID proto.HostEndpointID) { +func (fs *AddInterfaceFuncs) Invoke(ifaceName string, hostEPID types.HostEndpointID) { if fs.fs != nil { fs.fs(ifaceName, hostEPID) } @@ -103,13 +104,13 @@ func (fs *RemoveInterfaceFuncs) Append(f RemoveInterfaceFunc) *CbID { } } -type UpdateInterfaceFunc func(ifaceName string, newHostEPID proto.HostEndpointID) +type UpdateInterfaceFunc func(ifaceName string, newHostEPID types.HostEndpointID) type UpdateInterfaceFuncs struct { fs UpdateInterfaceFunc } -func (fs *UpdateInterfaceFuncs) Invoke(ifaceName string, newHostEPID proto.HostEndpointID) { +func (fs *UpdateInterfaceFuncs) Invoke(ifaceName string, newHostEPID types.HostEndpointID) { if fs.fs != nil { fs.fs(ifaceName, newHostEPID) } @@ -129,13 +130,13 @@ func (fs *UpdateInterfaceFuncs) Append(f UpdateInterfaceFunc) *CbID { } } -type UpdateHostEndpointFunc func(hostEPID proto.HostEndpointID) +type UpdateHostEndpointFunc func(hostEPID types.HostEndpointID) type UpdateHostEndpointFuncs struct { fs UpdateHostEndpointFunc } -func (fs *UpdateHostEndpointFuncs) Invoke(hostEPID proto.HostEndpointID) { +func (fs *UpdateHostEndpointFuncs) Invoke(hostEPID types.HostEndpointID) { if fs.fs != nil { fs.fs(hostEPID) } @@ -155,13 +156,13 @@ func (fs *UpdateHostEndpointFuncs) Append(f UpdateHostEndpointFunc) *CbID { } } -type RemoveHostEndpointFunc func(hostEPID proto.HostEndpointID) +type RemoveHostEndpointFunc func(hostEPID types.HostEndpointID) type RemoveHostEndpointFuncs struct { fs RemoveHostEndpointFunc } -func (fs *RemoveHostEndpointFuncs) Invoke(hostEPID proto.HostEndpointID) { +func (fs *RemoveHostEndpointFuncs) Invoke(hostEPID types.HostEndpointID) { if fs.fs != nil { fs.fs(hostEPID) } diff --git a/felix/dataplane/linux/bpf_ep_mgr.go b/felix/dataplane/linux/bpf_ep_mgr.go index 36e3857e61b..7f3a543fea7 100644 --- a/felix/dataplane/linux/bpf_ep_mgr.go +++ b/felix/dataplane/linux/bpf_ep_mgr.go @@ -39,6 +39,7 @@ import ( "time" "github.com/projectcalico/calico/felix/ethtool" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/health" "github.com/prometheus/client_golang/prometheus" @@ -214,7 +215,7 @@ var zeroIface bpfInterface = func() bpfInterface { type bpfInterfaceInfo struct { ifIndex int isUP bool - endpointID *proto.WorkloadEndpointID + endpointID *types.WorkloadEndpointID ifaceType IfaceType masterIfIndex int } @@ -272,15 +273,15 @@ type bpfEndpointManager struct { ifacesLock sync.Mutex nameToIface map[string]bpfInterface - allWEPs map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint - happyWEPs map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint + allWEPs map[types.WorkloadEndpointID]*proto.WorkloadEndpoint + happyWEPs map[types.WorkloadEndpointID]*proto.WorkloadEndpoint happyWEPsDirty bool - policies map[proto.PolicyID]*proto.Policy - profiles map[proto.ProfileID]*proto.Profile + policies map[types.PolicyID]*proto.Policy + profiles map[types.ProfileID]*proto.Profile // Indexes - policiesToWorkloads map[proto.PolicyID]set.Set[any] /* FIXME proto.WorkloadEndpointID or string (for a HEP) */ - profilesToWorkloads map[proto.ProfileID]set.Set[any] /* FIXME proto.WorkloadEndpointID or string (for a HEP) */ + policiesToWorkloads map[types.PolicyID]set.Set[any] /* FIXME types.WorkloadEndpointID or string (for a HEP) */ + profilesToWorkloads map[types.ProfileID]set.Set[any] /* FIXME types.WorkloadEndpointID or string (for a HEP) */ dirtyIfaceNames set.Set[string] @@ -399,7 +400,7 @@ type serviceKey struct { } type bpfAllowChainRenderer interface { - WorkloadInterfaceAllowChains(endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain + WorkloadInterfaceAllowChains(endpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain } type ManagerWithHEPUpdate interface { @@ -464,14 +465,14 @@ func newBPFEndpointManager( m := &bpfEndpointManager{ initUnknownIfaces: set.New[string](), dp: dp, - allWEPs: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, - happyWEPs: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + allWEPs: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + happyWEPs: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, happyWEPsDirty: true, - policies: map[proto.PolicyID]*proto.Policy{}, - profiles: map[proto.ProfileID]*proto.Profile{}, + policies: map[types.PolicyID]*proto.Policy{}, + profiles: map[types.ProfileID]*proto.Profile{}, nameToIface: map[string]bpfInterface{}, - policiesToWorkloads: map[proto.PolicyID]set.Set[any]{}, - profilesToWorkloads: map[proto.ProfileID]set.Set[any]{}, + policiesToWorkloads: map[types.PolicyID]set.Set[any]{}, + profilesToWorkloads: map[types.ProfileID]set.Set[any]{}, dirtyIfaceNames: set.New[string](), bpfLogLevel: config.BPFLogLevel, hostname: config.Hostname, @@ -1188,7 +1189,7 @@ func (m *bpfEndpointManager) onInterfaceUpdate(update *ifaceStateUpdate) { // workloads using that policy. func (m *bpfEndpointManager) onWorkloadEndpointUpdate(msg *proto.WorkloadEndpointUpdate) { log.WithField("wep", msg.Endpoint).Debug("Workload endpoint update") - wlID := *msg.Id + wlID := types.ProtoToWorkloadEndpointID(msg.GetId()) oldWEP := m.allWEPs[wlID] m.removeWEPFromIndexes(wlID, oldWEP) @@ -1203,7 +1204,7 @@ func (m *bpfEndpointManager) onWorkloadEndpointUpdate(msg *proto.WorkloadEndpoin // onWorkloadEndpointRemove removes the workload from the cache and the index, which maps from policy to workload. func (m *bpfEndpointManager) onWorkloadEndpointRemove(msg *proto.WorkloadEndpointRemove) { - wlID := *msg.Id + wlID := types.ProtoToWorkloadEndpointID(msg.GetId()) log.WithField("id", wlID).Debug("Workload endpoint removed") oldWEP := m.allWEPs[wlID] m.removeWEPFromIndexes(wlID, oldWEP) @@ -1224,7 +1225,7 @@ func (m *bpfEndpointManager) onWorkloadEndpointRemove(msg *proto.WorkloadEndpoin // onPolicyUpdate stores the policy in the cache and marks any endpoints using it dirty. func (m *bpfEndpointManager) onPolicyUpdate(msg *proto.ActivePolicyUpdate) { - polID := *msg.Id + polID := types.ProtoToPolicyID(msg.GetId()) log.WithField("id", polID).Debug("Policy update") m.policies[polID] = msg.Policy m.markEndpointsDirty(m.policiesToWorkloads[polID], "policy") @@ -1236,7 +1237,7 @@ func (m *bpfEndpointManager) onPolicyUpdate(msg *proto.ActivePolicyUpdate) { // onPolicyRemove removes the policy from the cache and marks any endpoints using it dirty. // The latter should be a no-op due to the ordering guarantees of the calc graph. func (m *bpfEndpointManager) onPolicyRemove(msg *proto.ActivePolicyRemove) { - polID := *msg.Id + polID := types.ProtoToPolicyID(msg.GetId()) log.WithField("id", polID).Debug("Policy removed") m.markEndpointsDirty(m.policiesToWorkloads[polID], "policy") delete(m.policies, polID) @@ -1249,7 +1250,7 @@ func (m *bpfEndpointManager) onPolicyRemove(msg *proto.ActivePolicyRemove) { // onProfileUpdate stores the profile in the cache and marks any endpoints that use it as dirty. func (m *bpfEndpointManager) onProfileUpdate(msg *proto.ActiveProfileUpdate) { - profID := *msg.Id + profID := types.ProtoToProfileID(msg.GetId()) log.WithField("id", profID).Debug("Profile update") m.profiles[profID] = msg.Profile m.markEndpointsDirty(m.profilesToWorkloads[profID], "profile") @@ -1261,7 +1262,7 @@ func (m *bpfEndpointManager) onProfileUpdate(msg *proto.ActiveProfileUpdate) { // onProfileRemove removes the profile from the cache and marks any endpoints that were using it as dirty. // The latter should be a no-op due to the ordering guarantees of the calc graph. func (m *bpfEndpointManager) onProfileRemove(msg *proto.ActiveProfileRemove) { - profID := *msg.Id + profID := types.ProtoToProfileID(msg.GetId()) log.WithField("id", profID).Debug("Profile removed") m.markEndpointsDirty(m.profilesToWorkloads[profID], "profile") delete(m.profiles, profID) @@ -1293,7 +1294,7 @@ func (m *bpfEndpointManager) markEndpointsDirty(ids set.Set[any], kind string) { } ids.Iter(func(item any) error { switch id := item.(type) { - case proto.WorkloadEndpointID: + case types.WorkloadEndpointID: m.markExistingWEPDirty(id, kind) case string: if id == allInterfaces { @@ -1312,7 +1313,7 @@ func (m *bpfEndpointManager) markEndpointsDirty(ids set.Set[any], kind string) { }) } -func (m *bpfEndpointManager) markExistingWEPDirty(wlID proto.WorkloadEndpointID, mapping string) { +func (m *bpfEndpointManager) markExistingWEPDirty(wlID types.WorkloadEndpointID, mapping string) { wep := m.allWEPs[wlID] if wep == nil { log.WithField("wlID", wlID).Panicf( @@ -1946,7 +1947,7 @@ func (m *bpfEndpointManager) updateWEPsInDataplane() { wg.Wait() for ifaceName, err := range errs { - var wlID *proto.WorkloadEndpointID + var wlID *types.WorkloadEndpointID m.withIface(ifaceName, func(iface *bpfInterface) bool { wlID = iface.info.endpointID @@ -2116,7 +2117,7 @@ func (m *bpfEndpointManager) doApplyPolicy(ifaceName string) (bpfInterfaceState, var ( state bpfInterfaceState - endpointID *proto.WorkloadEndpointID + endpointID *types.WorkloadEndpointID ifaceUp bool ifindex int ) @@ -2849,7 +2850,7 @@ func (m *bpfEndpointManager) extractTiers(tier *proto.TierInfo, direction PolDir } for i, polName := range directionalPols { - pol := m.policies[proto.PolicyID{Tier: tier.Name, Name: polName}] + pol := m.policies[types.PolicyID{Tier: tier.Name, Name: polName}] if pol == nil { log.WithField("tier", tier).Warn("Tier refers to unknown policy!") continue @@ -2892,7 +2893,7 @@ func (m *bpfEndpointManager) extractProfiles(profileNames []string, direction Po rProfiles = make([]polprog.Profile, count) for i, profName := range profileNames { - prof := m.profiles[proto.ProfileID{Name: profName}] + prof := m.profiles[types.ProfileID{Name: profName}] var prules []*proto.Rule if direction == PolDirnIngress { prules = prof.InboundRules @@ -2945,7 +2946,7 @@ func (m *bpfEndpointManager) isL3Iface(iface string) bool { return m.l3IfaceRegex.MatchString(iface) } -func (m *bpfEndpointManager) addWEPToIndexes(wlID proto.WorkloadEndpointID, wl *proto.WorkloadEndpoint) { +func (m *bpfEndpointManager) addWEPToIndexes(wlID types.WorkloadEndpointID, wl *proto.WorkloadEndpoint) { for _, t := range wl.Tiers { m.addPolicyToEPMappings(t.IngressPolicies, wlID) m.addPolicyToEPMappings(t.EgressPolicies, wlID) @@ -2955,7 +2956,7 @@ func (m *bpfEndpointManager) addWEPToIndexes(wlID proto.WorkloadEndpointID, wl * func (m *bpfEndpointManager) addPolicyToEPMappings(polNames []string, id interface{}) { for _, pol := range polNames { - polID := proto.PolicyID{ + polID := types.PolicyID{ Tier: "default", Name: pol, } @@ -2968,7 +2969,7 @@ func (m *bpfEndpointManager) addPolicyToEPMappings(polNames []string, id interfa func (m *bpfEndpointManager) addProfileToEPMappings(profileIds []string, id interface{}) { for _, profName := range profileIds { - profID := proto.ProfileID{Name: profName} + profID := types.ProfileID{Name: profName} profSet := m.profilesToWorkloads[profID] if profSet == nil { profSet = set.New[any]() @@ -2978,7 +2979,7 @@ func (m *bpfEndpointManager) addProfileToEPMappings(profileIds []string, id inte } } -func (m *bpfEndpointManager) removeWEPFromIndexes(wlID proto.WorkloadEndpointID, wep *proto.WorkloadEndpoint) { +func (m *bpfEndpointManager) removeWEPFromIndexes(wlID types.WorkloadEndpointID, wep *proto.WorkloadEndpoint) { if wep == nil { return } @@ -2998,7 +2999,7 @@ func (m *bpfEndpointManager) removeWEPFromIndexes(wlID proto.WorkloadEndpointID, func (m *bpfEndpointManager) removePolicyToEPMappings(polNames []string, id interface{}) { for _, pol := range polNames { - polID := proto.PolicyID{ + polID := types.PolicyID{ Tier: "default", Name: pol, } @@ -3016,7 +3017,7 @@ func (m *bpfEndpointManager) removePolicyToEPMappings(polNames []string, id inte func (m *bpfEndpointManager) removeProfileToEPMappings(profileIds []string, id any) { for _, profName := range profileIds { - profID := proto.ProfileID{Name: profName} + profID := types.ProfileID{Name: profName} profSet := m.profilesToWorkloads[profID] if profSet == nil { continue diff --git a/felix/dataplane/linux/bpf_ep_mgr_test.go b/felix/dataplane/linux/bpf_ep_mgr_test.go index 6fb55c8ddef..45d12a5141e 100644 --- a/felix/dataplane/linux/bpf_ep_mgr_test.go +++ b/felix/dataplane/linux/bpf_ep_mgr_test.go @@ -58,6 +58,7 @@ import ( mocknetlink "github.com/projectcalico/calico/felix/netlinkshim/mocknetlink" "github.com/projectcalico/calico/felix/proto" "github.com/projectcalico/calico/felix/rules" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -852,7 +853,7 @@ var _ = Describe("BPF Endpoint Manager", func() { It("stores host endpoint for eth0", func() { Expect(bpfEpMgr.hostIfaceToEpMap["eth0"]).To(Equal(hostEp)) - Expect(bpfEpMgr.policiesToWorkloads[proto.PolicyID{ + Expect(bpfEpMgr.policiesToWorkloads[types.PolicyID{ Tier: "default", Name: "mypolicy", }]).To(HaveKey("eth0")) @@ -902,7 +903,7 @@ var _ = Describe("BPF Endpoint Manager", func() { It("stores host endpoint for eth0", func() { Expect(bpfEpMgr.hostIfaceToEpMap["eth0"]).To(Equal(hostEp)) - Expect(bpfEpMgr.policiesToWorkloads[proto.PolicyID{ + Expect(bpfEpMgr.policiesToWorkloads[types.PolicyID{ Tier: "default", Name: "mypolicy", }]).To(HaveKey("eth0")) @@ -921,7 +922,7 @@ var _ = Describe("BPF Endpoint Manager", func() { It("stores host endpoint for eth0", func() { Expect(bpfEpMgr.hostIfaceToEpMap["eth0"]).To(Equal(hostEp)) - Expect(bpfEpMgr.policiesToWorkloads[proto.PolicyID{ + Expect(bpfEpMgr.policiesToWorkloads[types.PolicyID{ Tier: "default", Name: "mypolicy", }]).To(HaveKey("eth0")) @@ -940,7 +941,7 @@ var _ = Describe("BPF Endpoint Manager", func() { It("stores host endpoint for eth0", func() { Expect(bpfEpMgr.hostIfaceToEpMap["eth0"]).To(Equal(hostEp)) - Expect(bpfEpMgr.policiesToWorkloads[proto.PolicyID{ + Expect(bpfEpMgr.policiesToWorkloads[types.PolicyID{ Tier: "default", Name: "mypolicy", }]).To(HaveKey("eth0")) @@ -951,7 +952,7 @@ var _ = Describe("BPF Endpoint Manager", func() { It("clears host endpoint for eth0", func() { Expect(bpfEpMgr.hostIfaceToEpMap).To(BeEmpty()) - Expect(bpfEpMgr.policiesToWorkloads[proto.PolicyID{ + Expect(bpfEpMgr.policiesToWorkloads[types.PolicyID{ Tier: "default", Name: "mypolicy", }]).NotTo(HaveKey("eth0")) @@ -962,7 +963,7 @@ var _ = Describe("BPF Endpoint Manager", func() { It("clears host endpoint for eth0", func() { Expect(bpfEpMgr.hostIfaceToEpMap).To(BeEmpty()) - Expect(bpfEpMgr.policiesToWorkloads[proto.PolicyID{ + Expect(bpfEpMgr.policiesToWorkloads[types.PolicyID{ Tier: "default", Name: "mypolicy", }]).NotTo(HaveKey("eth0")) diff --git a/felix/dataplane/linux/bpf_route_mgr.go b/felix/dataplane/linux/bpf_route_mgr.go index c6dc366ee6b..6295cf449ae 100644 --- a/felix/dataplane/linux/bpf_route_mgr.go +++ b/felix/dataplane/linux/bpf_route_mgr.go @@ -20,6 +20,7 @@ import ( "time" log "github.com/sirupsen/logrus" + googleproto "google.golang.org/protobuf/proto" "github.com/projectcalico/calico/felix/bpf/bpfmap" "github.com/projectcalico/calico/felix/bpf/maps" @@ -28,6 +29,7 @@ import ( "github.com/projectcalico/calico/felix/ip" "github.com/projectcalico/calico/felix/logutils" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -63,15 +65,15 @@ type bpfRouteManager struct { // cidrToLocalIfaces maps from (/32) CIDR to the set of interfaces that have that CIDR cidrToLocalIfaces map[ip.CIDR]set.Set[string] localIfaceToCIDRs map[string]set.Set[ip.CIDR] - // cidrToWEPIDs maps from (/32) CIDR to the set of local proto.WorkloadEndpointIDs that have that CIDR. - cidrToWEPIDs map[ip.CIDR]set.Set[proto.WorkloadEndpointID] + // cidrToWEPIDs maps from (/32) CIDR to the set of local types.WorkloadEndpointIDs that have that CIDR. + cidrToWEPIDs map[ip.CIDR]set.Set[types.WorkloadEndpointID] // wepIDToWorkload contains all the local workloads. - wepIDToWorkload map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint + wepIDToWorkload map[types.WorkloadEndpointID]*proto.WorkloadEndpoint // ifaceNameToIdx maps local interface name to interface ID. ifaceNameToIdx map[string]int - // ifaceNameToWEPIDs maps local interface name to the set of local proto.WorkloadEndpointIDs that have that name. + // ifaceNameToWEPIDs maps local interface name to the set of local types.WorkloadEndpointIDs that have that name. // (Usually a single WEP). - ifaceNameToWEPIDs map[string]set.Set[proto.WorkloadEndpointID] + ifaceNameToWEPIDs map[string]set.Set[types.WorkloadEndpointID] // externalNodeCIDRs is a set of CIDRs that should be treated as external nodes (and hence we should allow // IPIP and VXLAN to/from them). externalNodeCIDRs set.Set[ip.CIDR] @@ -146,10 +148,10 @@ func newBPFRouteManager(config *Config, maps *bpfmap.IPMaps, ipFamily proto.IPVe cidrToRoute: map[ip.CIDR]proto.RouteUpdate{}, cidrToLocalIfaces: map[ip.CIDR]set.Set[string]{}, localIfaceToCIDRs: map[string]set.Set[ip.CIDR]{}, - cidrToWEPIDs: map[ip.CIDR]set.Set[proto.WorkloadEndpointID]{}, - wepIDToWorkload: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + cidrToWEPIDs: map[ip.CIDR]set.Set[types.WorkloadEndpointID]{}, + wepIDToWorkload: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, ifaceNameToIdx: map[string]int{}, - ifaceNameToWEPIDs: map[string]set.Set[proto.WorkloadEndpointID]{}, + ifaceNameToWEPIDs: map[string]set.Set[types.WorkloadEndpointID]{}, externalNodeCIDRs: extCIDRs, dirtyCIDRs: dirtyCIDRs, dsrOptoutCIDRs: noDsrCIDRs, @@ -330,13 +332,13 @@ func (m *bpfRouteManager) calculateRoute(cidr ip.CIDR) routes.ValueInterface { } if wepIDs, ok := m.cidrToWEPIDs[cidr]; ok { bestWepScore := -1 - var bestWepID proto.WorkloadEndpointID + var bestWepID *proto.WorkloadEndpointID if wepIDs.Len() > 1 { log.WithField("cidr", cidr).Warn( "Multiple local workloads with same IP but BPF dataplane only supports single route. " + "Will choose one route.") } - wepIDs.Iter(func(wepID proto.WorkloadEndpointID) error { + wepIDs.Iter(func(wepID types.WorkloadEndpointID) error { // Route is a local workload look up its name and interface details. wepScore := 0 wep := m.wepIDToWorkload[wepID] @@ -345,10 +347,11 @@ func (m *bpfRouteManager) calculateRoute(cidr ip.CIDR) routes.ValueInterface { if ok { wepScore++ } - if wepScore > bestWepScore || wepScore == bestWepScore && wepID.String() > bestWepID.String() { + pWepID := types.WorkloadEndpointIDToProto(wepID) + if wepScore > bestWepScore || wepScore == bestWepScore && pWepID.String() > bestWepID.String() { flags |= routes.FlagsLocalWorkload route = m.bpfOps.NewValueWithIfIndex(flags, ifaceIdx) - bestWepID = wepID + bestWepID = pWepID bestWepScore = wepScore } return nil @@ -503,7 +506,7 @@ func (m *bpfRouteManager) onIfaceIdxChanged(name string) { if wepIDs == nil { return } - wepIDs.Iter(func(wepID proto.WorkloadEndpointID) error { + wepIDs.Iter(func(wepID types.WorkloadEndpointID) error { wep := m.wepIDToWorkload[wepID] cidrs := m.getWorkloadCIDRs(wep) m.markCIDRsDirty(cidrs...) @@ -597,7 +600,8 @@ func (m *bpfRouteManager) onRouteUpdate(update *proto.RouteUpdate) { return } - if m.cidrToRoute[cidr] == *update { + cur := m.cidrToRoute[cidr] + if googleproto.Equal(&cur, update) { return } @@ -619,34 +623,37 @@ func (m *bpfRouteManager) onRouteRemove(update *proto.RouteRemove) { func (m *bpfRouteManager) onWorkloadEndpointUpdate(update *proto.WorkloadEndpointUpdate) { // Clean up the indexes for any old WEPs that had this ID. - m.removeWEP(update.Id) + id := types.ProtoToWorkloadEndpointID(update.GetId()) + m.removeWEP(&id) // Update the indexes to add this WEP. m.addWEP(update) } func (m *bpfRouteManager) addWEP(update *proto.WorkloadEndpointUpdate) { - m.wepIDToWorkload[*update.Id] = update.Endpoint + id := types.ProtoToWorkloadEndpointID(update.GetId()) + m.wepIDToWorkload[id] = update.Endpoint newCIDRs := m.getWorkloadCIDRs(update.Endpoint) for _, cidr := range newCIDRs { wepIDs := m.cidrToWEPIDs[cidr] if wepIDs == nil { - wepIDs = set.New[proto.WorkloadEndpointID]() + wepIDs = set.New[types.WorkloadEndpointID]() m.cidrToWEPIDs[cidr] = wepIDs } - wepIDs.Add(*update.Id) + wepIDs.Add(id) } m.markCIDRsDirty(newCIDRs...) wepIDs := m.ifaceNameToWEPIDs[update.Endpoint.Name] if wepIDs == nil { - wepIDs = set.New[proto.WorkloadEndpointID]() + wepIDs = set.New[types.WorkloadEndpointID]() m.ifaceNameToWEPIDs[update.Endpoint.Name] = wepIDs } - wepIDs.Add(*update.Id) + wepIDs.Add(id) } func (m *bpfRouteManager) onWorkloadEndpointRemove(update *proto.WorkloadEndpointRemove) { - m.removeWEP(update.Id) + id := types.ProtoToWorkloadEndpointID(update.GetId()) + m.removeWEP(&id) } func (m *bpfRouteManager) onBGPConfigUpdate(update *proto.GlobalBGPConfigUpdate) { @@ -686,7 +693,7 @@ func (m *bpfRouteManager) onBGPConfigUpdate(update *proto.GlobalBGPConfigUpdate) }) } -func (m *bpfRouteManager) removeWEP(id *proto.WorkloadEndpointID) { +func (m *bpfRouteManager) removeWEP(id *types.WorkloadEndpointID) { oldWEP := m.wepIDToWorkload[*id] if oldWEP == nil { return diff --git a/felix/dataplane/linux/endpoint_mgr.go b/felix/dataplane/linux/endpoint_mgr.go index 4cc89b59307..224da6f66da 100644 --- a/felix/dataplane/linux/endpoint_mgr.go +++ b/felix/dataplane/linux/endpoint_mgr.go @@ -33,6 +33,7 @@ import ( "github.com/projectcalico/calico/felix/proto" "github.com/projectcalico/calico/felix/routetable" "github.com/projectcalico/calico/felix/rules" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -74,7 +75,7 @@ func newEndpointManagerCallbacks(callbacks *common.Callbacks, ipVersion uint8) e } } -func (c *endpointManagerCallbacks) InvokeInterfaceCallbacks(old, new map[string]proto.HostEndpointID) { +func (c *endpointManagerCallbacks) InvokeInterfaceCallbacks(old, new map[string]types.HostEndpointID) { for ifaceName, oldEpID := range old { if newEpID, ok := new[ifaceName]; ok { if oldEpID != newEpID { @@ -91,11 +92,11 @@ func (c *endpointManagerCallbacks) InvokeInterfaceCallbacks(old, new map[string] } } -func (c *endpointManagerCallbacks) InvokeUpdateHostEndpoint(hostEpID proto.HostEndpointID) { +func (c *endpointManagerCallbacks) InvokeUpdateHostEndpoint(hostEpID types.HostEndpointID) { c.updateHostEndpoint.Invoke(hostEpID) } -func (c *endpointManagerCallbacks) InvokeRemoveHostEndpoint(hostEpID proto.HostEndpointID) { +func (c *endpointManagerCallbacks) InvokeRemoveHostEndpoint(hostEpID types.HostEndpointID) { c.removeHostEndpoint.Invoke(hostEpID) } @@ -136,25 +137,25 @@ type endpointManager struct { // Pending updates, cleared in CompleteDeferredWork as the data is copied to the activeXYZ // fields. - pendingWlEpUpdates map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint + pendingWlEpUpdates map[types.WorkloadEndpointID]*proto.WorkloadEndpoint pendingIfaceUpdates map[string]ifacemonitor.State - dirtyPolicyIDs set.Set[proto.PolicyID] + dirtyPolicyIDs set.Set[types.PolicyID] // Active state, updated in CompleteDeferredWork. - activeWlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint - activeWlIfaceNameToID map[string]proto.WorkloadEndpointID + activeWlEndpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint + activeWlIfaceNameToID map[string]types.WorkloadEndpointID activeUpIfaces set.Set[string] - activeWlIDToChains map[proto.WorkloadEndpointID][]*iptables.Chain + activeWlIDToChains map[types.WorkloadEndpointID][]*iptables.Chain activeWlDispatchChains map[string]*iptables.Chain activeEPMarkDispatchChains map[string]*iptables.Chain ifaceNameToPolicyGroupChainNames map[string][]string /*chain name*/ - activePolicySelectors map[proto.PolicyID]string + activePolicySelectors map[types.PolicyID]string policyChainRefCounts map[string]int // Chain name to count. // Workload endpoints that would be locally active but are 'shadowed' by other endpoints // with the same interface name. - shadowedWlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint + shadowedWlEndpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint // wlIfaceNamesToReconfigure contains names of workload interfaces that need to have // their configuration (sysctls etc.) refreshed. @@ -177,7 +178,7 @@ type endpointManager struct { // from the dataplane). hostIfaceToAddrs map[string]set.Set[string] // rawHostEndpoints contains the raw (i.e. not resolved to interface) host endpoints. - rawHostEndpoints map[proto.HostEndpointID]*proto.HostEndpoint + rawHostEndpoints map[types.HostEndpointID]*proto.HostEndpoint // hostEndpointsDirty is set to true when host endpoints are updated. hostEndpointsDirty bool // activeHostIfaceToChains maps host interface name to the chains that we've programmed. @@ -190,10 +191,10 @@ type endpointManager struct { activeHostFilterDispatchChains map[string]*iptables.Chain activeHostMangleDispatchChains map[string]*iptables.Chain // activeHostEpIDToIfaceNames records which interfaces we resolved each host endpoint to. - activeHostEpIDToIfaceNames map[proto.HostEndpointID][]string + activeHostEpIDToIfaceNames map[types.HostEndpointID][]string // activeIfaceNameToHostEpID records which endpoint we resolved each host interface to. - activeIfaceNameToHostEpID map[string]proto.HostEndpointID - newIfaceNameToHostEpID map[string]proto.HostEndpointID + activeIfaceNameToHostEpID map[string]types.HostEndpointID + newIfaceNameToHostEpID map[string]types.HostEndpointID needToCheckDispatchChains bool needToCheckEndpointMarkChains bool @@ -288,21 +289,21 @@ func newEndpointManagerWithShims( // Pending updates, we store these up as OnUpdate is called, then process them // in CompleteDeferredWork and transfer the important data to the activeXYX fields. - pendingWlEpUpdates: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + pendingWlEpUpdates: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, pendingIfaceUpdates: map[string]ifacemonitor.State{}, - dirtyPolicyIDs: set.New[proto.PolicyID](), + dirtyPolicyIDs: set.New[types.PolicyID](), activeUpIfaces: set.New[string](), - activeWlEndpoints: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, - activeWlIfaceNameToID: map[string]proto.WorkloadEndpointID{}, - activeWlIDToChains: map[proto.WorkloadEndpointID][]*iptables.Chain{}, + activeWlEndpoints: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + activeWlIfaceNameToID: map[string]types.WorkloadEndpointID{}, + activeWlIDToChains: map[types.WorkloadEndpointID][]*iptables.Chain{}, ifaceNameToPolicyGroupChainNames: map[string][]string{}, - activePolicySelectors: map[proto.PolicyID]string{}, + activePolicySelectors: map[types.PolicyID]string{}, policyChainRefCounts: map[string]int{}, - shadowedWlEndpoints: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + shadowedWlEndpoints: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, wlIfaceNamesToReconfigure: set.New[string](), @@ -313,7 +314,7 @@ func newEndpointManagerWithShims( defaultRPFilter: defaultRPFilter, hostIfaceToAddrs: map[string]set.Set[string]{}, - rawHostEndpoints: map[proto.HostEndpointID]*proto.HostEndpoint{}, + rawHostEndpoints: map[types.HostEndpointID]*proto.HostEndpoint{}, hostEndpointsDirty: true, activeHostIfaceToRawChains: map[string][]*iptables.Chain{}, @@ -340,19 +341,23 @@ func (m *endpointManager) OnUpdate(protoBufMsg interface{}) { log.WithField("msg", protoBufMsg).Debug("Received message") switch msg := protoBufMsg.(type) { case *proto.WorkloadEndpointUpdate: - m.pendingWlEpUpdates[*msg.Id] = msg.Endpoint + id := types.ProtoToWorkloadEndpointID(msg.GetId()) + m.pendingWlEpUpdates[id] = msg.Endpoint case *proto.WorkloadEndpointRemove: - m.pendingWlEpUpdates[*msg.Id] = nil + id := types.ProtoToWorkloadEndpointID(msg.GetId()) + m.pendingWlEpUpdates[id] = nil case *proto.HostEndpointUpdate: log.WithField("msg", msg).Debug("Host endpoint update") - m.callbacks.InvokeUpdateHostEndpoint(*msg.Id) - m.rawHostEndpoints[*msg.Id] = msg.Endpoint + id := types.ProtoToHostEndpointID(msg.GetId()) + m.callbacks.InvokeUpdateHostEndpoint(id) + m.rawHostEndpoints[id] = msg.Endpoint m.hostEndpointsDirty = true m.epIDsToUpdateStatus.Add(*msg.Id) case *proto.HostEndpointRemove: log.WithField("msg", msg).Debug("Host endpoint removed") - m.callbacks.InvokeRemoveHostEndpoint(*msg.Id) - delete(m.rawHostEndpoints, *msg.Id) + id := types.ProtoToHostEndpointID(msg.GetId()) + m.callbacks.InvokeRemoveHostEndpoint(id) + delete(m.rawHostEndpoints, id) m.hostEndpointsDirty = true m.epIDsToUpdateStatus.Add(*msg.Id) case *ifaceStateUpdate: @@ -372,7 +377,8 @@ func (m *endpointManager) OnUpdate(protoBufMsg interface{}) { m.hostEndpointsDirty = true case *proto.ActivePolicyUpdate: newSel := msg.Policy.OriginalSelector - if oldSel, ok := m.activePolicySelectors[*msg.Id]; ok && oldSel == newSel { + id := types.ProtoToPolicyID(msg.GetId()) + if oldSel, ok := m.activePolicySelectors[id]; ok && oldSel == newSel { // No change that we care about. return } else if ok { @@ -381,18 +387,19 @@ func (m *endpointManager) OnUpdate(protoBufMsg interface{}) { // need to do that for new policies because the calc graph guarantees // that we'll see an endpoint update after any new policies are // added to an endpoint. - m.dirtyPolicyIDs.Add(*msg.Id) + m.dirtyPolicyIDs.Add(id) } log.WithFields(log.Fields{ "id": *msg.Id, "selector": newSel, }).Debug("Active policy selector new/updated.") - m.activePolicySelectors[*msg.Id] = newSel + m.activePolicySelectors[id] = newSel case *proto.ActivePolicyRemove: // We can only get a remove after no endpoints are using this policy // so we no longer need to track it at all. - m.dirtyPolicyIDs.Discard(*msg.Id) - delete(m.activePolicySelectors, *msg.Id) + id := types.ProtoToPolicyID(msg.GetId()) + m.dirtyPolicyIDs.Discard(id) + delete(m.activePolicySelectors, id) } } @@ -467,7 +474,7 @@ wepLoop: for _, t := range wep.Tiers { for _, pols := range [][]string{t.IngressPolicies, t.EgressPolicies} { for _, p := range pols { - polID := proto.PolicyID{ + polID := types.PolicyID{ Tier: t.Name, Name: p, } @@ -504,7 +511,7 @@ func (m *endpointManager) tiersUseDirtyPolicy(tiers []*proto.TierInfo) bool { for _, t := range tiers { for _, pols := range [][]string{t.IngressPolicies, t.EgressPolicies} { for _, p := range pols { - polID := proto.PolicyID{ + polID := types.PolicyID{ Tier: t.Name, Name: p, } @@ -541,10 +548,10 @@ func (m *endpointManager) updateEndpointStatuses() { log.WithField("dirtyEndpoints", m.epIDsToUpdateStatus).Debug("Reporting endpoint status.") m.epIDsToUpdateStatus.Iter(func(item interface{}) error { switch id := item.(type) { - case proto.WorkloadEndpointID: + case types.WorkloadEndpointID: status := m.calculateWorkloadEndpointStatus(id) m.OnEndpointStatusUpdate(m.ipVersion, id, status) - case proto.HostEndpointID: + case types.HostEndpointID: status := m.calculateHostEndpointStatus(id) m.OnEndpointStatusUpdate(m.ipVersion, id, status) } @@ -553,7 +560,7 @@ func (m *endpointManager) updateEndpointStatuses() { }) } -func (m *endpointManager) calculateWorkloadEndpointStatus(id proto.WorkloadEndpointID) string { +func (m *endpointManager) calculateWorkloadEndpointStatus(id types.WorkloadEndpointID) string { logCxt := log.WithField("workloadEndpointID", id) logCxt.Debug("Re-evaluating workload endpoint status") var operUp, adminUp, failed bool @@ -587,7 +594,7 @@ func (m *endpointManager) calculateWorkloadEndpointStatus(id proto.WorkloadEndpo return status } -func (m *endpointManager) calculateHostEndpointStatus(id proto.HostEndpointID) (status string) { +func (m *endpointManager) calculateHostEndpointStatus(id types.HostEndpointID) (status string) { logCxt := log.WithField("hostEndpointID", id) logCxt.Debug("Re-evaluating host endpoint status") var resolved, operUp bool @@ -643,7 +650,7 @@ func (m *endpointManager) resolveWorkloadEndpoints() { m.needToCheckDispatchChains = true } - removeActiveWorkload := func(logCxt *log.Entry, oldWorkload *proto.WorkloadEndpoint, id proto.WorkloadEndpointID) { + removeActiveWorkload := func(logCxt *log.Entry, oldWorkload *proto.WorkloadEndpoint, id types.WorkloadEndpointID) { m.callbacks.InvokeRemoveWorkload(oldWorkload) m.filterTable.RemoveChains(m.activeWlIDToChains[id]) delete(m.activeWlIDToChains, id) @@ -797,7 +804,7 @@ func (m *endpointManager) resolveWorkloadEndpoints() { if oldWorkload != nil { // Check for another endpoint with the same interface name, // that should now become active. - bestShadowedId := proto.WorkloadEndpointID{} + bestShadowedId := types.WorkloadEndpointID{} for sId, sWorkload := range m.shadowedWlEndpoints { logCxt.Infof("Old workload %v", oldWorkload) logCxt.Infof("Shadowed workload %v", sWorkload) @@ -845,7 +852,7 @@ func (m *endpointManager) resolveWorkloadEndpoints() { } func (m *endpointManager) updateWorkloadEndpointChains( - id proto.WorkloadEndpointID, + id types.WorkloadEndpointID, workload *proto.WorkloadEndpoint, ingressPolicyNames []string, egressPolicyNames []string, @@ -867,7 +874,7 @@ func (m *endpointManager) updateWorkloadEndpointChains( m.activeWlIDToChains[id] = chains } -func wlIdsAscending(id1, id2 *proto.WorkloadEndpointID) bool { +func wlIdsAscending(id1, id2 *types.WorkloadEndpointID) bool { if id1.OrchestratorId == id2.OrchestratorId { // Need to compare WorkloadId. if id1.WorkloadId == id2.WorkloadId { @@ -911,7 +918,7 @@ func (m *endpointManager) resolveEndpointMarks() { m.updateDispatchChains(m.activeEPMarkDispatchChains, newEndpointMarkDispatchChains, m.filterTable) } -func (m *endpointManager) resolveHostEndpoints() map[string]proto.HostEndpointID { +func (m *endpointManager) resolveHostEndpoints() map[string]types.HostEndpointID { // Host endpoint resolution // ------------------------ // @@ -938,13 +945,13 @@ func (m *endpointManager) resolveHostEndpoints() map[string]proto.HostEndpointID // own. Rather it is looking at the set of local non-workload interfaces and // seeing which of them are matched by the current set of HostEndpoints as a // whole. - newIfaceNameToHostEpID := map[string]proto.HostEndpointID{} + newIfaceNameToHostEpID := map[string]types.HostEndpointID{} for ifaceName, ifaceAddrs := range m.hostIfaceToAddrs { ifaceCxt := log.WithFields(log.Fields{ "ifaceName": ifaceName, "ifaceAddrs": ifaceAddrs, }) - bestHostEpId := proto.HostEndpointID{} + bestHostEpId := types.HostEndpointID{} HostEpLoop: for id, hostEp := range m.rawHostEndpoints { logCxt := ifaceCxt.WithField("id", id) @@ -996,7 +1003,7 @@ func (m *endpointManager) resolveHostEndpoints() map[string]proto.HostEndpointID } // Similar loop to find the best all-interfaces host endpoint. - bestHostEpId := proto.HostEndpointID{} + bestHostEpId := types.HostEndpointID{} for id, hostEp := range m.rawHostEndpoints { logCxt := log.WithField("id", id) if !forAllInterfaces(hostEp) { @@ -1037,9 +1044,9 @@ func (m *endpointManager) updateHostEndpoints() { // Calculate filtered name/id maps for untracked and pre-DNAT policy, and a reverse map from // each active host endpoint to the interfaces it is in use for. newIfaceNameToHostEpID := m.newIfaceNameToHostEpID - newPreDNATIfaceNameToHostEpID := map[string]proto.HostEndpointID{} - newUntrackedIfaceNameToHostEpID := map[string]proto.HostEndpointID{} - newHostEpIDToIfaceNames := map[proto.HostEndpointID][]string{} + newPreDNATIfaceNameToHostEpID := map[string]types.HostEndpointID{} + newUntrackedIfaceNameToHostEpID := map[string]types.HostEndpointID{} + newHostEpIDToIfaceNames := map[types.HostEndpointID][]string{} for ifaceName, id := range newIfaceNameToHostEpID { logCxt := log.WithField("id", id).WithField("ifaceName", ifaceName) ep := m.rawHostEndpoints[id] @@ -1480,7 +1487,7 @@ func forAllInterfaces(hep *proto.HostEndpoint) bool { } // for implementing the endpointsSource interface -func (m *endpointManager) GetRawHostEndpoints() map[proto.HostEndpointID]*proto.HostEndpoint { +func (m *endpointManager) GetRawHostEndpoints() map[types.HostEndpointID]*proto.HostEndpoint { return m.rawHostEndpoints } @@ -1492,14 +1499,14 @@ func (m *endpointManager) groupPolicies(tierName string, names []string, directi Tier: tierName, Direction: direction, PolicyNames: []string{names[0]}, - Selector: m.activePolicySelectors[proto.PolicyID{ + Selector: m.activePolicySelectors[types.PolicyID{ Tier: tierName, Name: names[0], }], } groups := []*rules.PolicyGroup{group} for _, name := range names[1:] { - sel := m.activePolicySelectors[proto.PolicyID{ + sel := m.activePolicySelectors[types.PolicyID{ Tier: tierName, Name: name, }] diff --git a/felix/dataplane/linux/endpoint_mgr_test.go b/felix/dataplane/linux/endpoint_mgr_test.go index c52e5c08bad..b1b75cda350 100644 --- a/felix/dataplane/linux/endpoint_mgr_test.go +++ b/felix/dataplane/linux/endpoint_mgr_test.go @@ -35,6 +35,7 @@ import ( "github.com/projectcalico/calico/felix/routetable" "github.com/projectcalico/calico/felix/rules" "github.com/projectcalico/calico/felix/testutils" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -877,7 +878,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should report id1 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "up", })) }) @@ -901,7 +902,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0_polA")) It("should report id1 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "up", })) }) @@ -920,8 +921,8 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0_polA")) It("should report id1 up, but id2 now in error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "up", - proto.HostEndpointID{EndpointId: "id2"}: "error", + types.HostEndpointID{EndpointId: "id1"}: "up", + types.HostEndpointID{EndpointId: "id2"}: "error", })) }) @@ -936,7 +937,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0_polB")) It("should report id2 up only", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id2"}: "up", + types.HostEndpointID{EndpointId: "id2"}: "up", })) }) @@ -966,8 +967,8 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0_polB")) It("should report id0 up, but id1 now in error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id0"}: "up", - proto.HostEndpointID{EndpointId: "id1"}: "error", + types.HostEndpointID{EndpointId: "id0"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "error", })) }) @@ -982,7 +983,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0_polB")) It("should report id0 up only", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id0"}: "up", + types.HostEndpointID{EndpointId: "id0"}: "up", })) }) @@ -1235,7 +1236,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0")) It("should report id1 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "up", })) }) @@ -1255,7 +1256,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0")) It("should report id1 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "up", })) }) @@ -1267,8 +1268,8 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0", "eth1")) It("should report id1 and id22 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "up", - proto.HostEndpointID{EndpointId: "id22"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "up", + types.HostEndpointID{EndpointId: "id22"}: "up", })) }) }) @@ -1285,8 +1286,8 @@ func endpointManagerTests(ipVersion uint8) func() { // unused, and so reported as in error. It("should report id1 error and id0 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "error", - proto.HostEndpointID{EndpointId: "id0"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "error", + types.HostEndpointID{EndpointId: "id0"}: "up", })) }) }) @@ -1299,8 +1300,8 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0", "eth1")) It("should report id1 and id22 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id1"}: "up", - proto.HostEndpointID{EndpointId: "id22"}: "up", + types.HostEndpointID{EndpointId: "id1"}: "up", + types.HostEndpointID{EndpointId: "id22"}: "up", })) }) }) @@ -1315,7 +1316,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have empty dispatch chains", expectEmptyChains()) It("should report endpoint in error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id3"}: "error", + types.HostEndpointID{EndpointId: "id3"}: "error", })) }) }) @@ -1328,7 +1329,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0")) It("should report id4 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id4"}: "up", + types.HostEndpointID{EndpointId: "id4"}: "up", })) }) }) @@ -1341,7 +1342,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0")) It("should report id5 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id5"}: "up", + types.HostEndpointID{EndpointId: "id5"}: "up", })) }) }) @@ -1355,7 +1356,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0")) It("should report id3 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id3"}: "up", + types.HostEndpointID{EndpointId: "id3"}: "up", })) }) }) @@ -1369,7 +1370,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0")) It("should report id3 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id3"}: "up", + types.HostEndpointID{EndpointId: "id3"}: "up", })) }) }) @@ -1383,7 +1384,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have empty dispatch chains", expectEmptyChains()) It("should report id3 error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id3"}: "error", + types.HostEndpointID{EndpointId: "id3"}: "error", })) }) }) @@ -1397,7 +1398,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have empty dispatch chains", expectEmptyChains()) It("should report id3 error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id3"}: "error", + types.HostEndpointID{EndpointId: "id3"}: "error", })) }) }) @@ -1410,7 +1411,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have empty dispatch chains", expectEmptyChains()) It("should report id4 error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id4"}: "error", + types.HostEndpointID{EndpointId: "id4"}: "error", })) }) }) @@ -1423,7 +1424,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have empty dispatch chains", expectEmptyChains()) It("should report id5 error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id5"}: "error", + types.HostEndpointID{EndpointId: "id5"}: "error", })) }) }) @@ -1437,7 +1438,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have empty dispatch chains", expectEmptyChains()) It("should report id3 error", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id3"}: "error", + types.HostEndpointID{EndpointId: "id3"}: "error", })) }) @@ -1456,7 +1457,7 @@ func endpointManagerTests(ipVersion uint8) func() { It("should have expected chains", expectChainsFor("eth0")) It("should report id3 up", func() { Expect(statusReportRec.currentState).To(Equal(map[interface{}]string{ - proto.HostEndpointID{EndpointId: "id3"}: "up", + types.HostEndpointID{EndpointId: "id3"}: "up", })) }) }) diff --git a/felix/dataplane/linux/floating_ip_mgr.go b/felix/dataplane/linux/floating_ip_mgr.go index 5666b0c8c8b..276e2a7d086 100644 --- a/felix/dataplane/linux/floating_ip_mgr.go +++ b/felix/dataplane/linux/floating_ip_mgr.go @@ -24,6 +24,7 @@ import ( "github.com/projectcalico/calico/felix/iptables" "github.com/projectcalico/calico/felix/proto" "github.com/projectcalico/calico/felix/rules" + "github.com/projectcalico/calico/felix/types" ) // A floating IP is an IP that can be used to reach a particular workload endpoint, but that the @@ -76,7 +77,7 @@ type floatingIPManager struct { // Internal state. activeDNATChains []*iptables.Chain activeSNATChains []*iptables.Chain - natInfo map[proto.WorkloadEndpointID][]*proto.NatInfo + natInfo map[types.WorkloadEndpointID][]*proto.NatInfo dirtyNATInfo bool enabled bool } @@ -94,7 +95,7 @@ func newFloatingIPManager( activeDNATChains: []*iptables.Chain{}, activeSNATChains: []*iptables.Chain{}, - natInfo: map[proto.WorkloadEndpointID][]*proto.NatInfo{}, + natInfo: map[types.WorkloadEndpointID][]*proto.NatInfo{}, dirtyNATInfo: true, enabled: enabled, } @@ -105,18 +106,20 @@ func (m *floatingIPManager) OnUpdate(protoBufMsg interface{}) { case *proto.WorkloadEndpointUpdate: // We only program NAT mappings if the FloatingIPs feature is globally enabled, or // if the requested mapping comes from OpenStack. + id := types.ProtoToWorkloadEndpointID(msg.GetId()) if m.enabled || msg.Id.OrchestratorId == apiv3.OrchestratorOpenStack { if m.ipVersion == 4 { - m.natInfo[*msg.Id] = msg.Endpoint.Ipv4Nat + m.natInfo[id] = msg.Endpoint.Ipv4Nat } else { - m.natInfo[*msg.Id] = msg.Endpoint.Ipv6Nat + m.natInfo[id] = msg.Endpoint.Ipv6Nat } } else { - delete(m.natInfo, *msg.Id) + delete(m.natInfo, id) } m.dirtyNATInfo = true case *proto.WorkloadEndpointRemove: - delete(m.natInfo, *msg.Id) + id := types.ProtoToWorkloadEndpointID(msg.GetId()) + delete(m.natInfo, id) m.dirtyNATInfo = true } } diff --git a/felix/dataplane/linux/policy_mgr.go b/felix/dataplane/linux/policy_mgr.go index 4c7a02f2599..5386d51c36b 100644 --- a/felix/dataplane/linux/policy_mgr.go +++ b/felix/dataplane/linux/policy_mgr.go @@ -24,6 +24,7 @@ import ( "github.com/projectcalico/calico/felix/iptables" "github.com/projectcalico/calico/felix/proto" "github.com/projectcalico/calico/felix/rules" + "github.com/projectcalico/calico/felix/types" ) // policyManager simply renders policy/profile updates into iptables.Chain objects and sends @@ -36,13 +37,13 @@ type policyManager struct { ipVersion uint8 rawEgressOnly bool ipSetFilterDirty bool // Only used in "raw only" mode. - neededIPSets map[proto.PolicyID]set.Set[string] + neededIPSets map[types.PolicyID]set.Set[string] ipSetsCallback func(neededIPSets set.Set[string]) } type policyRenderer interface { - PolicyToIptablesChains(policyID *proto.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain - ProfileToIptablesChains(profileID *proto.ProfileID, policy *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) + PolicyToIptablesChains(policyID *types.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain + ProfileToIptablesChains(profileID *types.ProfileID, policy *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) } func newPolicyManager(rawTable, mangleTable, filterTable IptablesTable, ruleRenderer policyRenderer, ipVersion uint8) *policyManager { @@ -66,7 +67,7 @@ func newRawEgressPolicyManager(rawTable IptablesTable, ruleRenderer policyRender rawEgressOnly: true, // Make sure we set the filter at start-of-day, even if there are no policies. ipSetFilterDirty: true, - neededIPSets: make(map[proto.PolicyID]set.Set[string]), + neededIPSets: make(map[types.PolicyID]set.Set[string]), ipSetsCallback: ipSetsCallback, } } @@ -74,13 +75,14 @@ func newRawEgressPolicyManager(rawTable IptablesTable, ruleRenderer policyRender func (m *policyManager) OnUpdate(msg interface{}) { switch msg := msg.(type) { case *proto.ActivePolicyUpdate: + id := types.ProtoToPolicyID(msg.GetId()) if m.rawEgressOnly && !msg.Policy.Untracked { log.WithField("id", msg.Id).Debug("Clean up non-untracked policy.") - m.cleanUpPolicy(msg.Id) + m.cleanUpPolicy(&id) return } log.WithField("id", msg.Id).Debug("Updating policy chains") - chains := m.ruleRenderer.PolicyToIptablesChains(msg.Id, msg.Policy, m.ipVersion) + chains := m.ruleRenderer.PolicyToIptablesChains(&id, msg.Policy, m.ipVersion) if m.rawEgressOnly { neededIPSets := set.New[string]() filteredChains := []*iptables.Chain(nil) @@ -91,7 +93,7 @@ func (m *policyManager) OnUpdate(msg interface{}) { } } chains = filteredChains - m.updateNeededIPSets(msg.Id, neededIPSets) + m.updateNeededIPSets(&id, neededIPSets) } // We can't easily tell whether the policy is in use in a particular table, and, if the policy // type gets changed it may move between tables. Hence, we put the policy into all tables. @@ -101,27 +103,30 @@ func (m *policyManager) OnUpdate(msg interface{}) { m.filterTable.UpdateChains(chains) case *proto.ActivePolicyRemove: log.WithField("id", msg.Id).Debug("Removing policy chains") - m.cleanUpPolicy(msg.Id) + id := types.ProtoToPolicyID(msg.GetId()) + m.cleanUpPolicy(&id) case *proto.ActiveProfileUpdate: + id := types.ProtoToProfileID(msg.GetId()) if m.rawEgressOnly { log.WithField("id", msg.Id).Debug("Ignore non-untracked profile") return } log.WithField("id", msg.Id).Debug("Updating profile chains") - inbound, outbound := m.ruleRenderer.ProfileToIptablesChains(msg.Id, msg.Profile, m.ipVersion) + inbound, outbound := m.ruleRenderer.ProfileToIptablesChains(&id, msg.Profile, m.ipVersion) m.filterTable.UpdateChains([]*iptables.Chain{inbound, outbound}) m.mangleTable.UpdateChains([]*iptables.Chain{outbound}) case *proto.ActiveProfileRemove: log.WithField("id", msg.Id).Debug("Removing profile chains") - inName := rules.ProfileChainName(rules.ProfileInboundPfx, msg.Id) - outName := rules.ProfileChainName(rules.ProfileOutboundPfx, msg.Id) + id := types.ProtoToProfileID(msg.GetId()) + inName := rules.ProfileChainName(rules.ProfileInboundPfx, &id) + outName := rules.ProfileChainName(rules.ProfileOutboundPfx, &id) m.filterTable.RemoveChainByName(inName) m.filterTable.RemoveChainByName(outName) m.mangleTable.RemoveChainByName(outName) } } -func (m *policyManager) cleanUpPolicy(id *proto.PolicyID) { +func (m *policyManager) cleanUpPolicy(id *types.PolicyID) { if m.rawEgressOnly { m.updateNeededIPSets(id, nil) } @@ -136,7 +141,7 @@ func (m *policyManager) cleanUpPolicy(id *proto.PolicyID) { m.rawTable.RemoveChainByName(outName) } -func (m *policyManager) updateNeededIPSets(id *proto.PolicyID, neededIPSets set.Set[string]) { +func (m *policyManager) updateNeededIPSets(id *types.PolicyID, neededIPSets set.Set[string]) { if neededIPSets != nil { m.neededIPSets[*id] = neededIPSets } else { diff --git a/felix/dataplane/linux/policy_mgr_test.go b/felix/dataplane/linux/policy_mgr_test.go index edaf52363a2..95bd1e6ddf1 100644 --- a/felix/dataplane/linux/policy_mgr_test.go +++ b/felix/dataplane/linux/policy_mgr_test.go @@ -24,6 +24,7 @@ import ( "github.com/projectcalico/calico/felix/iptables" "github.com/projectcalico/calico/felix/proto" "github.com/projectcalico/calico/felix/rules" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -399,7 +400,7 @@ func (m *ipSetsMatcher) NegatedFailureMessage(actual interface{}) (message strin type mockPolRenderer struct { } -func (r *mockPolRenderer) PolicyToIptablesChains(policyID *proto.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain { +func (r *mockPolRenderer) PolicyToIptablesChains(policyID *types.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain { inName := rules.PolicyChainName(rules.PolicyInboundPfx, policyID) outName := rules.PolicyChainName(rules.PolicyOutboundPfx, policyID) return []*iptables.Chain{ @@ -407,7 +408,7 @@ func (r *mockPolRenderer) PolicyToIptablesChains(policyID *proto.PolicyID, polic {Name: outName}, } } -func (r *mockPolRenderer) ProfileToIptablesChains(profID *proto.ProfileID, policy *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) { +func (r *mockPolRenderer) ProfileToIptablesChains(profID *types.ProfileID, policy *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) { inbound = &iptables.Chain{ Name: rules.ProfileChainName(rules.ProfileInboundPfx, profID), } diff --git a/felix/dataplane/linux/status_combiner.go b/felix/dataplane/linux/status_combiner.go index b3fafb1cd68..e343d754cc3 100644 --- a/felix/dataplane/linux/status_combiner.go +++ b/felix/dataplane/linux/status_combiner.go @@ -18,6 +18,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -48,7 +49,7 @@ func newEndpointStatusCombiner(fromDataplane chan interface{}, ipv6Enabled bool) func (e *endpointStatusCombiner) OnEndpointStatusUpdate( ipVersion uint8, - id interface{}, // proto.HostEndpointID or proto.WorkloadEndpointID + id interface{}, // types.HostEndpointID or types.WorkloadEndpointID status string, ) { log.WithFields(log.Fields{ @@ -85,28 +86,32 @@ func (e *endpointStatusCombiner) Apply() { if statusToReport == "" { logCxt.Info("Reporting endpoint removed.") switch id := id.(type) { - case proto.WorkloadEndpointID: + case types.WorkloadEndpointID: + pid := types.WorkloadEndpointIDToProto(id) e.fromDataplane <- &proto.WorkloadEndpointStatusRemove{ - Id: &id, + Id: pid, } - case proto.HostEndpointID: + case types.HostEndpointID: + pid := types.HostEndpointIDToProto(id) e.fromDataplane <- &proto.HostEndpointStatusRemove{ - Id: &id, + Id: pid, } } } else { logCxt.WithField("status", statusToReport).Info("Reporting combined status.") switch id := id.(type) { - case proto.WorkloadEndpointID: + case types.WorkloadEndpointID: + pid := types.WorkloadEndpointIDToProto(id) e.fromDataplane <- &proto.WorkloadEndpointStatusUpdate{ - Id: &id, + Id: pid, Status: &proto.EndpointStatus{ Status: statusToReport, }, } - case proto.HostEndpointID: + case types.HostEndpointID: + pid := types.HostEndpointIDToProto(id) e.fromDataplane <- &proto.HostEndpointStatusUpdate{ - Id: &id, + Id: pid, Status: &proto.EndpointStatus{ Status: statusToReport, }, diff --git a/felix/dataplane/linux/xdp_state.go b/felix/dataplane/linux/xdp_state.go index 5db8fc2113d..458da3729d8 100644 --- a/felix/dataplane/linux/xdp_state.go +++ b/felix/dataplane/linux/xdp_state.go @@ -26,6 +26,7 @@ import ( "github.com/projectcalico/calico/felix/dataplane/common" "github.com/projectcalico/calico/felix/ipsets" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -144,11 +145,13 @@ func (x *xdpState) OnUpdate(protoBufMsg interface{}) { log.WithField("ipSetId", msg.Id).Debug("IP set remove") x.ipV4State.removeIPSet(msg.Id) case *proto.ActivePolicyUpdate: + id := types.ProtoToPolicyID(msg.GetId()) log.WithField("id", msg.Id).Debug("Updating policy chains") - x.ipV4State.updatePolicy(*msg.Id, msg.Policy) + x.ipV4State.updatePolicy(id, msg.Policy) case *proto.ActivePolicyRemove: + id := types.ProtoToPolicyID(msg.GetId()) log.WithField("id", msg.Id).Debug("Removing policy chains") - x.ipV4State.removePolicy(*msg.Id) + x.ipV4State.removePolicy(id) } } @@ -1099,7 +1102,7 @@ func (s *xdpIPState) processPendingDiffState(epSource endpointsSource) { // CHANGES IN HOST ENDPOINTS // Host Endpoints that were updated - pds.UpdatedHostEndpoints.Iter(func(hepID proto.HostEndpointID) error { + pds.UpdatedHostEndpoints.Iter(func(hepID types.HostEndpointID) error { s.logCxt.WithField("hostEpId", hepID.String()).Debug("Host endpoint has changed.") for ifaceName, data := range cs.IfaceNameToData { if processedIfaces.Contains(ifaceName) { @@ -1123,7 +1126,7 @@ func (s *xdpIPState) processPendingDiffState(epSource endpointsSource) { }) // Host Endpoints that were removed - pds.RemovedHostEndpoints.Iter(func(hepID proto.HostEndpointID) error { + pds.RemovedHostEndpoints.Iter(func(hepID types.HostEndpointID) error { // XXX do nothing return nil }) @@ -1131,7 +1134,7 @@ func (s *xdpIPState) processPendingDiffState(epSource endpointsSource) { // CHANGES IN POLICIES // Policies that should be removed - pds.PoliciesToRemove.Iter(func(policyID proto.PolicyID) error { + pds.PoliciesToRemove.Iter(func(policyID types.PolicyID) error { delete(newCs.XDPEligiblePolicies, policyID) return nil }) @@ -1282,8 +1285,8 @@ func dumpSetToString(s set.Set[string]) string { return strings.Join(strs, ", ") } -func (s *xdpIPState) processHostEndpointChange(ifaceName string, oldData *xdpIfaceData, newHepID proto.HostEndpointID, newEP *proto.HostEndpoint, changeInMaps map[string]map[string]int) { - policiesToSetIDs := make(map[proto.PolicyID]set.Set[string] /**/) +func (s *xdpIPState) processHostEndpointChange(ifaceName string, oldData *xdpIfaceData, newHepID types.HostEndpointID, newEP *proto.HostEndpoint, changeInMaps map[string]map[string]int) { + policiesToSetIDs := make(map[types.PolicyID]set.Set[string] /**/) oldSetIDs := make(map[string]int) for _, setIDs := range oldData.PoliciesToSetIDs { setIDs.Iter(func(setID string) error { @@ -1353,12 +1356,12 @@ func (s *xdpIPState) processHostEndpointChange(ifaceName string, oldData *xdpIfa } } -func getPolicyIDs(hep *proto.HostEndpoint) []proto.PolicyID { - var policyIDs []proto.PolicyID +func getPolicyIDs(hep *proto.HostEndpoint) []types.PolicyID { + var policyIDs []types.PolicyID // we handle Untracked policy only for _, tier := range hep.GetUntrackedTiers() { for _, policyName := range tier.IngressPolicies { - policyID := proto.PolicyID{ + policyID := types.PolicyID{ Tier: tier.Name, Name: policyName, } @@ -1386,7 +1389,7 @@ func getSetIDs(rules *xdpRules) set.Set[string] /**/ { return setIDs } -func (s *xdpIPState) getLatestRulesForPolicyID(policyID proto.PolicyID) *xdpRules { +func (s *xdpIPState) getLatestRulesForPolicyID(policyID types.PolicyID) *xdpRules { logCxt := s.logCxt.WithField("policyID", policyID.String()) rules, ok := s.pendingDiffState.PoliciesToUpdate[policyID] if ok { @@ -1407,7 +1410,7 @@ func (s *xdpIPState) getLatestRulesForPolicyID(policyID proto.PolicyID) *xdpRule } } -func (s *xdpIPState) updatePolicy(policyID proto.PolicyID, policy *proto.Policy) { +func (s *xdpIPState) updatePolicy(policyID types.PolicyID, policy *proto.Policy) { s.logCxt.WithFields(log.Fields{ "policyID": policyID, "policy": policy, @@ -1422,7 +1425,7 @@ func (s *xdpIPState) updatePolicy(policyID proto.PolicyID, policy *proto.Policy) } } -func (s *xdpIPState) removePolicy(policyID proto.PolicyID) { +func (s *xdpIPState) removePolicy(policyID types.PolicyID) { s.logCxt.WithField("policyID", policyID).Debug("removePolicy callback called.") delete(s.pendingDiffState.PoliciesToUpdate, policyID) s.pendingDiffState.PoliciesToRemove.Add(policyID) @@ -1558,7 +1561,7 @@ func (s *xdpIPState) isSetIDInCurrentState(setID string) bool { return false } -func (s *xdpIPState) addInterface(ifaceName string, hostEPID proto.HostEndpointID) { +func (s *xdpIPState) addInterface(ifaceName string, hostEPID types.HostEndpointID) { s.logCxt.WithFields(log.Fields{ "ifaceName": ifaceName, "hostEPID": hostEPID, @@ -1573,7 +1576,7 @@ func (s *xdpIPState) removeInterface(ifaceName string) { s.pendingDiffState.IfaceNamesToDrop.Add(ifaceName) } -func (s *xdpIPState) updateInterface(ifaceName string, newHostEPID proto.HostEndpointID) { +func (s *xdpIPState) updateInterface(ifaceName string, newHostEPID types.HostEndpointID) { s.logCxt.WithFields(log.Fields{ "ifaceName": ifaceName, "newHostEPID": newHostEPID, @@ -1582,7 +1585,7 @@ func (s *xdpIPState) updateInterface(ifaceName string, newHostEPID proto.HostEnd s.pendingDiffState.IfaceEpIDChange[ifaceName] = newHostEPID } -func (s *xdpIPState) updateHostEndpoint(hostEPID proto.HostEndpointID) { +func (s *xdpIPState) updateHostEndpoint(hostEPID types.HostEndpointID) { s.logCxt.WithField("hostEPID", hostEPID).Debug("updateHostEndpoint callback called.") s.pendingDiffState.RemovedHostEndpoints.Discard(hostEPID) @@ -1661,7 +1664,7 @@ func (s *xdpIPState) getAffectedIfaces(setID string) map[string]uint32 { return ifacesToRefCounts } -func (s *xdpIPState) isHostEndpointIDInCurrentState(hep proto.HostEndpointID) bool { +func (s *xdpIPState) isHostEndpointIDInCurrentState(hep types.HostEndpointID) bool { for _, data := range s.currentState.IfaceNameToData { if data.EpID == hep { return true @@ -1670,7 +1673,7 @@ func (s *xdpIPState) isHostEndpointIDInCurrentState(hep proto.HostEndpointID) bo return false } -func (s *xdpIPState) removeHostEndpoint(hostEPID proto.HostEndpointID) { +func (s *xdpIPState) removeHostEndpoint(hostEPID types.HostEndpointID) { s.logCxt.WithField("hostEPID", hostEPID).Debug("removeHostEndpoint callback called.") s.pendingDiffState.RemovedHostEndpoints.Add(hostEPID) @@ -1688,20 +1691,20 @@ type xdpSystemState struct { IfaceNameToData map[string]xdpIfaceData // a cache of all the policies that could be implemented with // XDP, even those that currently are not - XDPEligiblePolicies map[proto.PolicyID]xdpRules + XDPEligiblePolicies map[types.PolicyID]xdpRules } func newXDPSystemState() *xdpSystemState { return &xdpSystemState{ IfaceNameToData: make(map[string]xdpIfaceData), - XDPEligiblePolicies: make(map[proto.PolicyID]xdpRules), + XDPEligiblePolicies: make(map[types.PolicyID]xdpRules), } } func (s *xdpSystemState) Copy() *xdpSystemState { newState := xdpSystemState{ IfaceNameToData: make(map[string]xdpIfaceData), - XDPEligiblePolicies: make(map[proto.PolicyID]xdpRules), + XDPEligiblePolicies: make(map[types.PolicyID]xdpRules), } for k, v := range s.IfaceNameToData { @@ -1716,24 +1719,24 @@ func (s *xdpSystemState) Copy() *xdpSystemState { } type xdpPendingDiffState struct { - NewIfaceNameToHostEpID map[string]proto.HostEndpointID + NewIfaceNameToHostEpID map[string]types.HostEndpointID IfaceNamesToDrop set.Set[string] // - IfaceEpIDChange map[string]proto.HostEndpointID - UpdatedHostEndpoints set.Set[proto.HostEndpointID] // - RemovedHostEndpoints set.Set[proto.HostEndpointID] // - PoliciesToRemove set.Set[proto.PolicyID] // - PoliciesToUpdate map[proto.PolicyID]*xdpRules + IfaceEpIDChange map[string]types.HostEndpointID + UpdatedHostEndpoints set.Set[types.HostEndpointID] // + RemovedHostEndpoints set.Set[types.HostEndpointID] // + PoliciesToRemove set.Set[types.PolicyID] // + PoliciesToUpdate map[types.PolicyID]*xdpRules } func newXDPPendingDiffState() *xdpPendingDiffState { return &xdpPendingDiffState{ - NewIfaceNameToHostEpID: make(map[string]proto.HostEndpointID), + NewIfaceNameToHostEpID: make(map[string]types.HostEndpointID), IfaceNamesToDrop: set.New[string](), - IfaceEpIDChange: make(map[string]proto.HostEndpointID), - UpdatedHostEndpoints: set.New[proto.HostEndpointID](), - RemovedHostEndpoints: set.New[proto.HostEndpointID](), - PoliciesToRemove: set.New[proto.PolicyID](), - PoliciesToUpdate: make(map[proto.PolicyID]*xdpRules), + IfaceEpIDChange: make(map[string]types.HostEndpointID), + UpdatedHostEndpoints: set.New[types.HostEndpointID](), + RemovedHostEndpoints: set.New[types.HostEndpointID](), + PoliciesToRemove: set.New[types.PolicyID](), + PoliciesToUpdate: make(map[types.PolicyID]*xdpRules), } } @@ -2111,13 +2114,13 @@ func processMemberDeletions(memberCache *xdpMemberCache, iface string, mi member } type xdpIfaceData struct { - EpID proto.HostEndpointID - PoliciesToSetIDs map[proto.PolicyID]set.Set[string] + EpID types.HostEndpointID + PoliciesToSetIDs map[types.PolicyID]set.Set[string] } func (data xdpIfaceData) Copy() xdpIfaceData { new := data - new.PoliciesToSetIDs = make(map[proto.PolicyID]set.Set[string], len(data.PoliciesToSetIDs)) + new.PoliciesToSetIDs = make(map[types.PolicyID]set.Set[string], len(data.PoliciesToSetIDs)) for k, v := range data.PoliciesToSetIDs { // this makes shallow copy, but fortunately these are // just strings @@ -2155,7 +2158,7 @@ type xdpRule struct { } type endpointsSource interface { - GetRawHostEndpoints() map[proto.HostEndpointID]*proto.HostEndpoint + GetRawHostEndpoints() map[types.HostEndpointID]*proto.HostEndpoint } var _ endpointsSource = &endpointManager{} diff --git a/felix/dataplane/linux/xdp_state_test.go b/felix/dataplane/linux/xdp_state_test.go index 5da3c5088b2..68b7e99e8df 100644 --- a/felix/dataplane/linux/xdp_state_test.go +++ b/felix/dataplane/linux/xdp_state_test.go @@ -26,6 +26,7 @@ import ( "github.com/projectcalico/calico/felix/bpf" "github.com/projectcalico/calico/felix/ipsets" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -47,10 +48,10 @@ func (s *mockIPSetsSource) GetIPSetMembers(setID string) (set.Set[string], error } type mockEndpointsSource struct { - rawHep map[proto.HostEndpointID]*proto.HostEndpoint + rawHep map[types.HostEndpointID]*proto.HostEndpoint } -func (s *mockEndpointsSource) GetRawHostEndpoints() map[proto.HostEndpointID]*proto.HostEndpoint { +func (s *mockEndpointsSource) GetRawHostEndpoints() map[types.HostEndpointID]*proto.HostEndpoint { return s.rawHep } @@ -141,7 +142,7 @@ type updatePolicyType struct { } func (up *updatePolicyType) Do(ipState *xdpIPState) { - policyID := proto.PolicyID{Tier: "default", Name: up.policyID} + policyID := types.PolicyID{Tier: "default", Name: up.policyID} policy := &proto.Policy{InboundRules: up.inRules} ipState.updatePolicy(policyID, policy) } @@ -151,7 +152,7 @@ type removePolicyType struct { } func (rp *removePolicyType) Do(ipState *xdpIPState) { - policyID := proto.PolicyID{Tier: "default", Name: rp.policyID} + policyID := types.PolicyID{Tier: "default", Name: rp.policyID} ipState.removePolicy(policyID) } @@ -196,7 +197,7 @@ type addInterfaceType struct { } func (ai *addInterfaceType) Do(ipState *xdpIPState) { - ipState.addInterface(ai.ifaceName, proto.HostEndpointID{EndpointId: ai.endpointID}) + ipState.addInterface(ai.ifaceName, types.HostEndpointID{EndpointId: ai.endpointID}) } type removeInterfaceType struct { @@ -213,7 +214,7 @@ type updateInterfaceType struct { } func (ui *updateInterfaceType) Do(ipState *xdpIPState) { - ipState.updateInterface(ui.ifaceName, proto.HostEndpointID{EndpointId: ui.endpointID}) + ipState.updateInterface(ui.ifaceName, types.HostEndpointID{EndpointId: ui.endpointID}) } type updateHostEndpointType struct { @@ -221,7 +222,7 @@ type updateHostEndpointType struct { } func (uh *updateHostEndpointType) Do(ipState *xdpIPState) { - ipState.updateHostEndpoint(proto.HostEndpointID{EndpointId: uh.endpointID}) + ipState.updateHostEndpoint(types.HostEndpointID{EndpointId: uh.endpointID}) } type removeHostEndpointType struct { @@ -229,7 +230,7 @@ type removeHostEndpointType struct { } func (rh *removeHostEndpointType) Do(ipState *xdpIPState) { - ipState.removeHostEndpoint(proto.HostEndpointID{EndpointId: rh.endpointID}) + ipState.removeHostEndpoint(types.HostEndpointID{EndpointId: rh.endpointID}) } var _ testCBEvent = &updatePolicyType{} @@ -370,19 +371,19 @@ type testIfaceData struct { func testStateToRealState(testIfaces map[string]testIfaceData, testEligiblePolicies map[string][][]string, realState *xdpSystemState) { for ifaceName, ifaceData := range testIfaces { - policiesToSetIDs := make(map[proto.PolicyID]set.Set[string], len(ifaceData.policiesToSets)) + policiesToSetIDs := make(map[types.PolicyID]set.Set[string], len(ifaceData.policiesToSets)) for policyID, setIDs := range ifaceData.policiesToSets { - protoID := proto.PolicyID{Tier: "default", Name: policyID} + protoID := types.PolicyID{Tier: "default", Name: policyID} setIDsSet := set.FromArray(setIDs) policiesToSetIDs[protoID] = setIDsSet } realState.IfaceNameToData[ifaceName] = xdpIfaceData{ - EpID: proto.HostEndpointID{EndpointId: ifaceData.epID}, + EpID: types.HostEndpointID{EndpointId: ifaceData.epID}, PoliciesToSetIDs: policiesToSetIDs, } } for policyID, testRules := range testEligiblePolicies { - protoID := proto.PolicyID{Tier: "default", Name: policyID} + protoID := types.PolicyID{Tier: "default", Name: policyID} rules := make([]xdpRule, 0, len(testRules)) for _, setIDs := range testRules { rules = append(rules, xdpRule{ @@ -447,9 +448,9 @@ var _ = Describe("XDP state", func() { expectedNcs := newXDPSystemState() testStateToRealState(s.currentState, s.eligiblePolicies, cs) testStateToRealState(s.newCurrentState, s.newEligiblePolicies, expectedNcs) - rawHep := make(map[proto.HostEndpointID]*proto.HostEndpoint, len(s.endpoints)) + rawHep := make(map[types.HostEndpointID]*proto.HostEndpoint, len(s.endpoints)) for epID, policyIDs := range s.endpoints { - protoEpID := proto.HostEndpointID{ + protoEpID := types.HostEndpointID{ EndpointId: epID, } protoEndpoint := &proto.HostEndpoint{ @@ -2723,10 +2724,10 @@ var _ = Describe("XDP state", func() { for iface, needsXDP := range s.newState { data := xdpIfaceData{} if needsXDP { - policyID := proto.PolicyID{Tier: "default", Name: "bar"} - endpointID := proto.HostEndpointID{EndpointId: "foo"} + policyID := types.PolicyID{Tier: "default", Name: "bar"} + endpointID := types.HostEndpointID{EndpointId: "foo"} data.EpID = endpointID - data.PoliciesToSetIDs = map[proto.PolicyID]set.Set[string]{ + data.PoliciesToSetIDs = map[types.PolicyID]set.Set[string]{ policyID: set.From("ipset"), } } diff --git a/felix/dataplane/mock/mock_dataplane.go b/felix/dataplane/mock/mock_dataplane.go index 5bc4026d8b6..b0a70229b66 100644 --- a/felix/dataplane/mock/mock_dataplane.go +++ b/felix/dataplane/mock/mock_dataplane.go @@ -26,6 +26,7 @@ import ( "github.com/projectcalico/calico/felix/config" extdataplane "github.com/projectcalico/calico/felix/dataplane/external" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" ) @@ -34,10 +35,10 @@ type MockDataplane struct { inSync bool ipSets map[string]set.Set[string] - activePolicies map[proto.PolicyID]*proto.Policy - activeUntrackedPolicies set.Set[proto.PolicyID] - activePreDNATPolicies set.Set[proto.PolicyID] - activeProfiles set.Set[proto.ProfileID] + activePolicies map[types.PolicyID]*proto.Policy + activeUntrackedPolicies set.Set[types.PolicyID] + activePreDNATPolicies set.Set[types.PolicyID] + activeProfiles set.Set[types.ProfileID] activeVTEPs map[string]proto.VXLANTunnelEndpointUpdate activeWireguardEndpoints map[string]proto.WireguardEndpointUpdate activeWireguardV6Endpoints map[string]proto.WireguardEndpointV6Update @@ -46,10 +47,10 @@ type MockDataplane struct { endpointToPolicyOrder map[string][]TierInfo endpointToUntrackedPolicyOrder map[string][]TierInfo endpointToPreDNATPolicyOrder map[string][]TierInfo - endpointToAllPolicyIDs map[string][]proto.PolicyID + endpointToAllPolicyIDs map[string][]types.PolicyID endpointToProfiles map[string][]string - serviceAccounts map[proto.ServiceAccountID]*proto.ServiceAccountUpdate - namespaces map[proto.NamespaceID]*proto.NamespaceUpdate + serviceAccounts map[types.ServiceAccountID]*proto.ServiceAccountUpdate + namespaces map[types.NamespaceID]*proto.NamespaceUpdate config map[string]string numEvents int encapsulation proto.Encapsulation @@ -78,11 +79,11 @@ func (d *MockDataplane) IPSets() map[string]set.Set[string] { return copy } -func (d *MockDataplane) ActivePolicies() set.Set[proto.PolicyID] { +func (d *MockDataplane) ActivePolicies() set.Set[types.PolicyID] { d.Lock() defer d.Unlock() - policyIDs := set.New[proto.PolicyID]() + policyIDs := set.New[types.PolicyID]() for k := range d.activePolicies { policyIDs.Add(k) } @@ -90,26 +91,26 @@ func (d *MockDataplane) ActivePolicies() set.Set[proto.PolicyID] { return policyIDs } -func (d *MockDataplane) ActivePolicy(k proto.PolicyID) *proto.Policy { +func (d *MockDataplane) ActivePolicy(k types.PolicyID) *proto.Policy { d.Lock() defer d.Unlock() return d.activePolicies[k] } -func (d *MockDataplane) ActiveUntrackedPolicies() set.Set[proto.PolicyID] { +func (d *MockDataplane) ActiveUntrackedPolicies() set.Set[types.PolicyID] { d.Lock() defer d.Unlock() return d.activeUntrackedPolicies.Copy() } -func (d *MockDataplane) ActivePreDNATPolicies() set.Set[proto.PolicyID] { +func (d *MockDataplane) ActivePreDNATPolicies() set.Set[types.PolicyID] { d.Lock() defer d.Unlock() return d.activePreDNATPolicies.Copy() } -func (d *MockDataplane) ActiveProfiles() set.Set[proto.ProfileID] { +func (d *MockDataplane) ActiveProfiles() set.Set[types.ProfileID] { d.Lock() defer d.Unlock() @@ -196,22 +197,22 @@ func (d *MockDataplane) EndpointToPreDNATPolicyOrder() map[string][]TierInfo { return copyPolOrder(d.endpointToPreDNATPolicyOrder) } -func (d *MockDataplane) ServiceAccounts() map[proto.ServiceAccountID]*proto.ServiceAccountUpdate { +func (d *MockDataplane) ServiceAccounts() map[types.ServiceAccountID]*proto.ServiceAccountUpdate { d.Lock() defer d.Unlock() - cpy := make(map[proto.ServiceAccountID]*proto.ServiceAccountUpdate) + cpy := make(map[types.ServiceAccountID]*proto.ServiceAccountUpdate) for k, v := range d.serviceAccounts { cpy[k] = v } return cpy } -func (d *MockDataplane) Namespaces() map[proto.NamespaceID]*proto.NamespaceUpdate { +func (d *MockDataplane) Namespaces() map[types.NamespaceID]*proto.NamespaceUpdate { d.Lock() defer d.Unlock() - cpy := make(map[proto.NamespaceID]*proto.NamespaceUpdate) + cpy := make(map[types.NamespaceID]*proto.NamespaceUpdate) for k, v := range d.namespaces { cpy[k] = v } @@ -262,10 +263,10 @@ func (d *MockDataplane) Config() map[string]string { func NewMockDataplane() *MockDataplane { s := &MockDataplane{ ipSets: make(map[string]set.Set[string]), - activePolicies: map[proto.PolicyID]*proto.Policy{}, - activeProfiles: set.New[proto.ProfileID](), - activeUntrackedPolicies: set.New[proto.PolicyID](), - activePreDNATPolicies: set.New[proto.PolicyID](), + activePolicies: map[types.PolicyID]*proto.Policy{}, + activeProfiles: set.New[types.ProfileID](), + activeUntrackedPolicies: set.New[types.PolicyID](), + activePreDNATPolicies: set.New[types.PolicyID](), activeRoutes: set.New[proto.RouteUpdate](), activeVTEPs: make(map[string]proto.VXLANTunnelEndpointUpdate), activeWireguardEndpoints: make(map[string]proto.WireguardEndpointUpdate), @@ -275,9 +276,9 @@ func NewMockDataplane() *MockDataplane { endpointToUntrackedPolicyOrder: make(map[string][]TierInfo), endpointToPreDNATPolicyOrder: make(map[string][]TierInfo), endpointToProfiles: make(map[string][]string), - endpointToAllPolicyIDs: make(map[string][]proto.PolicyID), - serviceAccounts: make(map[proto.ServiceAccountID]*proto.ServiceAccountUpdate), - namespaces: make(map[proto.NamespaceID]*proto.NamespaceUpdate), + endpointToAllPolicyIDs: make(map[string][]types.PolicyID), + serviceAccounts: make(map[types.ServiceAccountID]*proto.ServiceAccountUpdate), + namespaces: make(map[types.NamespaceID]*proto.NamespaceUpdate), } return s } @@ -374,7 +375,7 @@ func (d *MockDataplane) OnEvent(event interface{}) { case *proto.WorkloadEndpointUpdate: tiers := event.Endpoint.Tiers tierInfos := make([]TierInfo, len(tiers)) - var allPolsIDs []proto.PolicyID + var allPolsIDs []types.PolicyID for i, tier := range event.Endpoint.Tiers { tierInfos[i].Name = tier.Name tierInfos[i].IngressPolicyNames = tier.IngressPolicies @@ -386,7 +387,7 @@ func (d *MockDataplane) OnEvent(event interface{}) { combinedPolNames = append(combinedPolNames, tier.IngressPolicies...) combinedPolNames = append(combinedPolNames, tier.EgressPolicies...) for _, polName := range combinedPolNames { - polID := proto.PolicyID{Tier: tier.Name, Name: polName} + polID := types.PolicyID{Tier: tier.Name, Name: polName} allPolsIDs = append(allPolsIDs, polID) Expect(d.activePolicies).To(HaveKey(polID), fmt.Sprintf("Expected policy %v referenced by workload endpoint "+ @@ -402,7 +403,7 @@ func (d *MockDataplane) OnEvent(event interface{}) { // Check that all the profiles referenced by the endpoint are already present, which // is one of the guarantees provided by the EventSequencer. for _, profName := range event.Endpoint.ProfileIds { - profID := proto.ProfileID{Name: profName} + profID := types.ProfileID{Name: profName} Expect(d.activeProfiles.Contains(profID)).To(BeTrue(), fmt.Sprintf("Expected profile %v referenced by workload endpoint "+ "update %v to be active", profID, event)) @@ -514,14 +515,14 @@ type TierInfo struct { EgressPolicyNames []string } -type workloadId proto.WorkloadEndpointID +type workloadId types.WorkloadEndpointID func (w *workloadId) String() string { return fmt.Sprintf("%v/%v/%v", w.OrchestratorId, w.WorkloadId, w.EndpointId) } -type hostEpId proto.HostEndpointID +type hostEpId types.HostEndpointID func (i *hostEpId) String() string { return i.EndpointId diff --git a/felix/dataplane/windows/endpoint_mgr.go b/felix/dataplane/windows/endpoint_mgr.go index ad2e7906c0c..0a46212d61e 100644 --- a/felix/dataplane/windows/endpoint_mgr.go +++ b/felix/dataplane/windows/endpoint_mgr.go @@ -27,6 +27,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/projectcalico/calico/felix/dataplane/windows/hns" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/felix/dataplane/windows/policysets" "github.com/projectcalico/calico/felix/proto" @@ -62,9 +63,9 @@ type endpointManager struct { // the policysets dataplane to be used when looking up endpoint policies/profiles. policysetsDataplane policysets.PolicySetsDataplane // pendingWlEpUpdates stores any pending updates to be performed per endpoint. - pendingWlEpUpdates map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint + pendingWlEpUpdates map[types.WorkloadEndpointID]*proto.WorkloadEndpoint // activeWlEndpoints stores the active/current state that was applied per endpoint - activeWlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint + activeWlEndpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint // addressToEndpointId serves as a hns endpoint id cache. It enables us to lookup the hns // endpoint id for a given endpoint ip address. addressToEndpointId map[string]string @@ -115,8 +116,8 @@ func newEndpointManager(hns hnsInterface, policysets policysets.PolicySetsDatapl hnsNetworkRegexp: networkNameRegexp, policysetsDataplane: policysets, addressToEndpointId: make(map[string]string), - activeWlEndpoints: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, - pendingWlEpUpdates: map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + activeWlEndpoints: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, + pendingWlEpUpdates: map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{}, pendingIPSetUpdate: set.New[string](), hostAddrs: hostIPv4s, } @@ -136,10 +137,12 @@ func (m *endpointManager) OnUpdate(msg interface{}) { switch msg := msg.(type) { case *proto.WorkloadEndpointUpdate: log.WithField("workloadEndpointId", msg.Id).Info("Processing WorkloadEndpointUpdate") - m.pendingWlEpUpdates[*msg.Id] = msg.Endpoint + id := types.ProtoToWorkloadEndpointID(msg.GetId()) + m.pendingWlEpUpdates[id] = msg.Endpoint case *proto.WorkloadEndpointRemove: log.WithField("workloadEndpointId", msg.Id).Info("Processing WorkloadEndpointRemove") - m.pendingWlEpUpdates[*msg.Id] = nil + id := types.ProtoToWorkloadEndpointID(msg.GetId()) + m.pendingWlEpUpdates[id] = nil case *proto.ActivePolicyUpdate: log.WithField("policyID", msg.Id).Info("Processing ActivePolicyUpdate") m.ProcessPolicyProfileUpdate(policysets.PolicyNamePrefix + msg.Id.Name) @@ -429,7 +432,7 @@ func (m *endpointManager) markAllEndpointForRefresh() { // applyRules gathers all of the rules for the specified policies and sends them to hns // as an endpoint policy update (this actually applies the rules to the dataplane). -func (m *endpointManager) applyRules(workloadId proto.WorkloadEndpointID, endpointId string, inboundPolicyIds []string, outboundPolicyIds []string) error { +func (m *endpointManager) applyRules(workloadId types.WorkloadEndpointID, endpointId string, inboundPolicyIds []string, outboundPolicyIds []string) error { logCxt := log.WithFields(log.Fields{"id": workloadId, "endpointId": endpointId}) logCxt.WithFields(log.Fields{ "inboundPolicyIds": inboundPolicyIds, diff --git a/felix/dataplane/windows/policysets/policysets_test.go b/felix/dataplane/windows/policysets/policysets_test.go index d587990a1e7..cccd1c987ca 100644 --- a/felix/dataplane/windows/policysets/policysets_test.go +++ b/felix/dataplane/windows/policysets/policysets_test.go @@ -23,6 +23,7 @@ import ( "github.com/projectcalico/calico/felix/dataplane/windows/hns" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" ) func TestRuleRenderingWithStaticRules(t *testing.T) { @@ -674,7 +675,7 @@ func TestNegativeTestCases(t *testing.T) { }), "unexpected rule with Negative match") //Test with invalid argument to AddOrReplacePolicySet (Other than Profile/Policy) - ps.AddOrReplacePolicySet("invalid-arg", &proto.ProfileID{ + ps.AddOrReplacePolicySet("invalid-arg", &types.ProfileID{ Name: "abc", }) diff --git a/felix/fv/policysync_test.go b/felix/fv/policysync_test.go index 92660276079..f2dc8d64d7b 100644 --- a/felix/fv/policysync_test.go +++ b/felix/fv/policysync_test.go @@ -40,6 +40,7 @@ import ( "github.com/projectcalico/calico/pod2daemon/binder" "github.com/projectcalico/calico/felix/dataplane/mock" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/set" "github.com/projectcalico/calico/felix/proto" @@ -261,7 +262,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { It("workload 0's client should receive correct updates", func() { Eventually(mockWlClient[0].InSync).Should(BeTrue()) - Eventually(mockWlClient[0].ActiveProfiles).Should(Equal(set.From(proto.ProfileID{Name: "default"}))) + Eventually(mockWlClient[0].ActiveProfiles).Should(Equal(set.From(types.ProfileID{Name: "default"}))) // Should only hear about our own workload. Eventually(mockWlClient[0].EndpointToPolicyOrder).Should(Equal( map[string][]mock.TierInfo{"k8s/fv/fv-pod-0/eth0": {}})) @@ -269,7 +270,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { It("workload 1's client should receive correct updates", func() { Eventually(mockWlClient[1].InSync).Should(BeTrue()) - Eventually(mockWlClient[1].ActiveProfiles).Should(Equal(set.From(proto.ProfileID{Name: "default"}))) + Eventually(mockWlClient[1].ActiveProfiles).Should(Equal(set.From(types.ProfileID{Name: "default"}))) // Should only hear about our own workload. Eventually(mockWlClient[1].EndpointToPolicyOrder).Should(Equal( map[string][]mock.TierInfo{"k8s/fv/fv-pod-1/eth0": {}})) @@ -305,7 +306,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { } if wlIdx != 2 { - policyID := proto.PolicyID{Name: "default.policy-0", Tier: "default"} + policyID := types.PolicyID{Name: "default.policy-0", Tier: "default"} Eventually(mockWlClient[wlIdx].ActivePolicies, waitTime).Should(Equal(set.From(policyID))) } @@ -313,7 +314,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { Expect(err).NotTo(HaveOccurred()) if wlIdx != 2 { - Eventually(mockWlClient[wlIdx].ActivePolicies, waitTime).Should(Equal(set.New[proto.PolicyID]())) + Eventually(mockWlClient[wlIdx].ActivePolicies, waitTime).Should(Equal(set.New[types.PolicyID]())) } } } @@ -334,7 +335,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { Context("after adding a policy that applies to workload 0 only", func() { var ( policy *api.GlobalNetworkPolicy - policyID proto.PolicyID + policyID types.PolicyID ) BeforeEach(func() { @@ -363,7 +364,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { policy, err = calicoClient.GlobalNetworkPolicies().Create(ctx, policy, utils.NoOptions) Expect(err).NotTo(HaveOccurred()) - policyID = proto.PolicyID{Name: "default.policy-0", Tier: "default"} + policyID = types.PolicyID{Name: "default.policy-0", Tier: "default"} }) It("should be sent to workload 0 only", func() { @@ -377,8 +378,8 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { IngressPolicyNames: []string{"default.policy-0"}, }}})) - Consistently(mockWlClient[1].ActivePolicies).Should(Equal(set.New[proto.PolicyID]())) - Consistently(mockWlClient[2].ActivePolicies).Should(Equal(set.New[proto.PolicyID]())) + Consistently(mockWlClient[1].ActivePolicies).Should(Equal(set.New[types.PolicyID]())) + Consistently(mockWlClient[2].ActivePolicies).Should(Equal(set.New[types.PolicyID]())) }) It("should be correctly mapped to proto policy", func() { @@ -432,7 +433,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { _, err := calicoClient.GlobalNetworkPolicies().Delete(ctx, "policy-0", options.DeleteOptions{}) Expect(err).NotTo(HaveOccurred()) - Eventually(mockWlClient[0].ActivePolicies).Should(Equal(set.New[proto.PolicyID]())) + Eventually(mockWlClient[0].ActivePolicies).Should(Equal(set.New[types.PolicyID]())) }) It("should handle a change of selector", func() { @@ -450,7 +451,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { Eventually(mockWlClient[0].EndpointToPolicyOrder).Should(Equal( map[string][]mock.TierInfo{"k8s/fv/fv-pod-0/eth0": {}})) - Eventually(mockWlClient[0].ActivePolicies).Should(Equal(set.New[proto.PolicyID]())) + Eventually(mockWlClient[0].ActivePolicies).Should(Equal(set.New[types.PolicyID]())) By("Updating workload 1 to make the policy active") Eventually(mockWlClient[1].ActivePolicies).Should(Equal(set.From(policyID))) @@ -461,13 +462,13 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { IngressPolicyNames: []string{"default.policy-0"}, }}})) - Consistently(mockWlClient[2].ActivePolicies).Should(Equal(set.New[proto.PolicyID]())) + Consistently(mockWlClient[2].ActivePolicies).Should(Equal(set.New[types.PolicyID]())) }) It("should handle a change of profiles", func() { // Make sure the initial update makes it through or we might get a // false positive. - defProfID := proto.ProfileID{Name: "default"} + defProfID := types.ProfileID{Name: "default"} Eventually(mockWlClient[0].ActiveProfiles).Should(Equal(set.From( defProfID, ))) @@ -481,7 +482,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { Expect(err).NotTo(HaveOccurred()) By("Sending through an endpoint update and policy remove/update") - notDefProfID := proto.ProfileID{Name: "notdefault"} + notDefProfID := types.ProfileID{Name: "notdefault"} Eventually(mockWlClient[0].EndpointToProfiles).Should(Equal(map[string][]string{ "k8s/fv/fv-pod-0/eth0": {"notdefault"}})) Eventually(mockWlClient[0].ActiveProfiles).Should(Equal(set.From(notDefProfID))) @@ -492,7 +493,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { }) Context("after adding a service account as profile", func() { - var saID proto.ServiceAccountID + var saID types.ServiceAccountID BeforeEach(func() { log.Info("Adding Service Account Profile") @@ -511,7 +512,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { It("should sync service account to each workload", func() { for _, c := range mockWlClient { - Eventually(c.ServiceAccounts).Should(Equal(map[proto.ServiceAccountID]*proto.ServiceAccountUpdate{ + Eventually(c.ServiceAccounts).Should(Equal(map[types.ServiceAccountID]*proto.ServiceAccountUpdate{ saID: { Id: &saID, Labels: map[string]string{"key.1": "value.1", "key_2": "value-2"}, @@ -522,7 +523,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { }) Context("after adding a namespace as profile", func() { - var nsID proto.NamespaceID + var nsID types.NamespaceID BeforeEach(func() { log.Info("Adding Namespace Profile") @@ -540,7 +541,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { It("should sync namespace to each workload", func() { for _, c := range mockWlClient { - Eventually(c.Namespaces).Should(Equal(map[proto.NamespaceID]*proto.NamespaceUpdate{ + Eventually(c.Namespaces).Should(Equal(map[types.NamespaceID]*proto.NamespaceUpdate{ nsID: { Id: &nsID, Labels: map[string]string{"key.1": "value.1", "key_2": "value-2"}, @@ -563,7 +564,7 @@ var _ = Context("_POL-SYNC_ _BPF-SAFE_ policy sync API tests", func() { expectFullSync := func(client *mockWorkloadClient) { // The new client should take over, getting a full sync. Eventually(client.InSync).Should(BeTrue()) - Eventually(client.ActiveProfiles).Should(Equal(set.From(proto.ProfileID{Name: "default"}))) + Eventually(client.ActiveProfiles).Should(Equal(set.From(types.ProfileID{Name: "default"}))) Eventually(client.EndpointToPolicyOrder).Should(Equal(map[string][]mock.TierInfo{"k8s/fv/fv-pod-0/eth0": {}})) } diff --git a/felix/policysync/processor.go b/felix/policysync/processor.go index 778864f4a19..4807ce15d9a 100644 --- a/felix/policysync/processor.go +++ b/felix/policysync/processor.go @@ -20,6 +20,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" ) // MaxMembersPerMessage sets the limit on how many IP Set members to include in an outgoing gRPC message, which has a @@ -33,11 +34,11 @@ const MaxMembersPerMessage = 82200 type Processor struct { Updates <-chan interface{} JoinUpdates chan interface{} - endpointsByID map[proto.WorkloadEndpointID]*EndpointInfo - policyByID map[proto.PolicyID]*policyInfo - profileByID map[proto.ProfileID]*profileInfo - serviceAccountByID map[proto.ServiceAccountID]*proto.ServiceAccountUpdate - namespaceByID map[proto.NamespaceID]*proto.NamespaceUpdate + endpointsByID map[types.WorkloadEndpointID]*EndpointInfo + policyByID map[types.PolicyID]*policyInfo + profileByID map[types.ProfileID]*profileInfo + serviceAccountByID map[types.ServiceAccountID]*proto.ServiceAccountUpdate + namespaceByID map[types.NamespaceID]*proto.NamespaceUpdate ipSetsByID map[string]*ipSetInfo receivedInSync bool } @@ -47,13 +48,13 @@ type EndpointInfo struct { output chan<- proto.ToDataplane currentJoinUID uint64 endpointUpd *proto.WorkloadEndpointUpdate - syncedPolicies map[proto.PolicyID]bool - syncedProfiles map[proto.ProfileID]bool + syncedPolicies map[types.PolicyID]bool + syncedProfiles map[types.ProfileID]bool syncedIPSets map[string]bool } type JoinMetadata struct { - EndpointID proto.WorkloadEndpointID + EndpointID types.WorkloadEndpointID // JoinUID is a correlator, used to match stop requests with join requests. JoinUID uint64 } @@ -78,11 +79,11 @@ func NewProcessor(updates <-chan interface{}) *Processor { Updates: updates, // JoinUpdates from the new servers that have started. JoinUpdates: make(chan interface{}, 10), - endpointsByID: make(map[proto.WorkloadEndpointID]*EndpointInfo), - policyByID: make(map[proto.PolicyID]*policyInfo), - profileByID: make(map[proto.ProfileID]*profileInfo), - serviceAccountByID: make(map[proto.ServiceAccountID]*proto.ServiceAccountUpdate), - namespaceByID: make(map[proto.NamespaceID]*proto.NamespaceUpdate), + endpointsByID: make(map[types.WorkloadEndpointID]*EndpointInfo), + policyByID: make(map[types.PolicyID]*policyInfo), + profileByID: make(map[types.ProfileID]*profileInfo), + serviceAccountByID: make(map[types.ServiceAccountID]*proto.ServiceAccountUpdate), + namespaceByID: make(map[types.NamespaceID]*proto.NamespaceUpdate), ipSetsByID: make(map[string]*ipSetInfo), } } @@ -130,8 +131,8 @@ func (p *Processor) handleJoin(joinReq JoinRequest) { ei.currentJoinUID = joinReq.JoinUID ei.output = joinReq.C - ei.syncedPolicies = map[proto.PolicyID]bool{} - ei.syncedProfiles = map[proto.ProfileID]bool{} + ei.syncedPolicies = map[types.PolicyID]bool{} + ei.syncedProfiles = map[types.ProfileID]bool{} ei.syncedIPSets = map[string]bool{} p.maybeSyncEndpoint(ei) @@ -228,15 +229,15 @@ func (p *Processor) handleInSync(update *proto.InSync) { } func (p *Processor) handleWorkloadEndpointUpdate(update *proto.WorkloadEndpointUpdate) { - epID := *update.Id + epID := types.ProtoToWorkloadEndpointID(update.GetId()) log.WithField("epID", epID).Info("Endpoint update") ei, ok := p.endpointsByID[epID] if !ok { // Add this endpoint ei = &EndpointInfo{ endpointUpd: update, - syncedPolicies: map[proto.PolicyID]bool{}, - syncedProfiles: map[proto.ProfileID]bool{}, + syncedPolicies: map[types.PolicyID]bool{}, + syncedProfiles: map[types.ProfileID]bool{}, } p.endpointsByID[epID] = ei } else { @@ -270,23 +271,24 @@ func (p *Processor) maybeSyncEndpoint(ei *EndpointInfo) { func (p *Processor) handleWorkloadEndpointRemove(update *proto.WorkloadEndpointRemove) { // we trust the Calc graph never to send us a remove for an endpoint it didn't tell us about - ei := p.endpointsByID[*update.Id] + epID := types.ProtoToWorkloadEndpointID(update.GetId()) + ei := p.endpointsByID[epID] if ei.output != nil { // Send update and close down. ei.output <- proto.ToDataplane{Payload: &proto.ToDataplane_WorkloadEndpointRemove{WorkloadEndpointRemove: update}} close(ei.output) } - delete(p.endpointsByID, *update.Id) + delete(p.endpointsByID, epID) } func (p *Processor) handleActiveProfileUpdate(update *proto.ActiveProfileUpdate) { - pId := *update.Id + pId := types.ProtoToProfileID(update.GetId()) profile := update.GetProfile() p.profileByID[pId] = newProfileInfo(profile) // Update any endpoints that reference this profile for _, ei := range p.updateableEndpoints() { - action := func(other proto.ProfileID) bool { + action := func(other types.ProfileID) bool { if other == pId { doAdd, doDel := p.getIPSetsSync(ei) doAdd() @@ -302,7 +304,7 @@ func (p *Processor) handleActiveProfileUpdate(update *proto.ActiveProfileUpdate) } func (p *Processor) handleActiveProfileRemove(update *proto.ActiveProfileRemove) { - pId := *update.Id + pId := types.ProtoToProfileID(update.GetId()) log.WithFields(log.Fields{"ProfileID": pId}).Debug("Processing ActiveProfileRemove") // We trust the Calc graph to remove all references to the Profile before sending the Remove, thus we will have @@ -311,7 +313,7 @@ func (p *Processor) handleActiveProfileRemove(update *proto.ActiveProfileRemove) } func (p *Processor) handleActivePolicyUpdate(update *proto.ActivePolicyUpdate) { - pId := *update.Id + pId := types.ProtoToPolicyID(update.GetId()) log.WithFields(log.Fields{"PolicyID": pId}).Debug("Processing ActivePolicyUpdate") policy := update.GetPolicy() p.policyByID[pId] = newPolicyInfo(policy) @@ -319,7 +321,7 @@ func (p *Processor) handleActivePolicyUpdate(update *proto.ActivePolicyUpdate) { // Update any endpoints that reference this policy for _, ei := range p.updateableEndpoints() { // Closure of the action to take on each policy on the endpoint. - action := func(other proto.PolicyID) bool { + action := func(other types.PolicyID) bool { if other == pId { doAdd, doDel := p.getIPSetsSync(ei) doAdd() @@ -335,7 +337,7 @@ func (p *Processor) handleActivePolicyUpdate(update *proto.ActivePolicyUpdate) { } func (p *Processor) handleActivePolicyRemove(update *proto.ActivePolicyRemove) { - pId := *update.Id + pId := types.ProtoToPolicyID(update.GetId()) log.WithFields(log.Fields{"PolicyID": pId}).Debug("Processing ActivePolicyRemove") // We trust the Calc graph to remove all references to the Policy before sending the Remove, thus we will have @@ -344,7 +346,7 @@ func (p *Processor) handleActivePolicyRemove(update *proto.ActivePolicyRemove) { } func (p *Processor) handleServiceAccountUpdate(update *proto.ServiceAccountUpdate) { - id := *update.Id + id := types.ProtoToServiceAccountID(update.GetId()) log.WithField("ServiceAccountID", id).Debug("Processing ServiceAccountUpdate") for _, ei := range p.updateableEndpoints() { @@ -354,7 +356,7 @@ func (p *Processor) handleServiceAccountUpdate(update *proto.ServiceAccountUpdat } func (p *Processor) handleServiceAccountRemove(update *proto.ServiceAccountRemove) { - id := *update.Id + id := types.ProtoToServiceAccountID(update.GetId()) log.WithField("ServiceAccountID", id).Debug("Processing ServiceAccountRemove") for _, ei := range p.updateableEndpoints() { @@ -364,7 +366,7 @@ func (p *Processor) handleServiceAccountRemove(update *proto.ServiceAccountRemov } func (p *Processor) handleNamespaceUpdate(update *proto.NamespaceUpdate) { - id := *update.Id + id := types.ProtoToNamespaceID(update.GetId()) log.WithField("NamespaceID", id).Debug("Processing NamespaceUpdate") for _, ei := range p.updateableEndpoints() { @@ -374,7 +376,7 @@ func (p *Processor) handleNamespaceUpdate(update *proto.NamespaceUpdate) { } func (p *Processor) handleNamespaceRemove(update *proto.NamespaceRemove) { - id := *update.Id + id := types.ProtoToNamespaceID(update.GetId()) log.WithField("NamespaceID", id).Debug("Processing NamespaceRemove") for _, ei := range p.updateableEndpoints() { @@ -442,12 +444,13 @@ func (p *Processor) handleIPSetRemove(update *proto.IPSetRemove) { } func (p *Processor) syncAddedPolicies(ei *EndpointInfo) { - ei.iteratePolicies(func(pId proto.PolicyID) bool { + ei.iteratePolicies(func(pId types.PolicyID) bool { if !ei.syncedPolicies[pId] { policy := p.policyByID[pId].p + ppId := types.PolicyIDToProto(pId) ei.output <- proto.ToDataplane{Payload: &proto.ToDataplane_ActivePolicyUpdate{ ActivePolicyUpdate: &proto.ActivePolicyUpdate{ - Id: &pId, + Id: ppId, Policy: policy, }, }} @@ -461,9 +464,9 @@ func (p *Processor) syncAddedPolicies(ei *EndpointInfo) { // policies. func (p *Processor) syncRemovedPolicies(ei *EndpointInfo) { oldSyncedPolicies := ei.syncedPolicies - ei.syncedPolicies = map[proto.PolicyID]bool{} + ei.syncedPolicies = map[types.PolicyID]bool{} - ei.iteratePolicies(func(pId proto.PolicyID) bool { + ei.iteratePolicies(func(pId types.PolicyID) bool { if !oldSyncedPolicies[pId] { log.WithFields(log.Fields{ "PolicyID": pId, @@ -479,19 +482,21 @@ func (p *Processor) syncRemovedPolicies(ei *EndpointInfo) { // oldSyncedPolicies now contains only policies that are no longer needed by this endpoint. for polID := range oldSyncedPolicies { + ppolID := types.PolicyIDToProto(polID) ei.output <- proto.ToDataplane{Payload: &proto.ToDataplane_ActivePolicyRemove{ - ActivePolicyRemove: &proto.ActivePolicyRemove{Id: &polID}, + ActivePolicyRemove: &proto.ActivePolicyRemove{Id: ppolID}, }} } } func (p *Processor) syncAddedProfiles(ei *EndpointInfo) { - ei.iterateProfiles(func(pId proto.ProfileID) bool { + ei.iterateProfiles(func(pId types.ProfileID) bool { if !ei.syncedProfiles[pId] { profile := p.profileByID[pId].p + ppId := types.ProfileIDToProto(pId) ei.output <- proto.ToDataplane{Payload: &proto.ToDataplane_ActiveProfileUpdate{ ActiveProfileUpdate: &proto.ActiveProfileUpdate{ - Id: &pId, + Id: ppId, Profile: profile, }, }} @@ -505,9 +510,9 @@ func (p *Processor) syncAddedProfiles(ei *EndpointInfo) { // profiles. func (p *Processor) syncRemovedProfiles(ei *EndpointInfo) { oldSyncedProfiles := ei.syncedProfiles - ei.syncedProfiles = map[proto.ProfileID]bool{} + ei.syncedProfiles = map[types.ProfileID]bool{} - ei.iterateProfiles(func(pId proto.ProfileID) bool { + ei.iterateProfiles(func(pId types.ProfileID) bool { if !oldSyncedProfiles[pId] { log.WithField("profileID", pId).Panic("syncing removed profiles before all profiles are added") } @@ -520,8 +525,9 @@ func (p *Processor) syncRemovedProfiles(ei *EndpointInfo) { // oldSyncedProfiles now contains only policies that are no longer needed by this endpoint. for polID := range oldSyncedProfiles { + ppolID := types.ProfileIDToProto(polID) ei.output <- proto.ToDataplane{Payload: &proto.ToDataplane_ActiveProfileRemove{ - ActiveProfileRemove: &proto.ActiveProfileRemove{Id: &polID}, + ActiveProfileRemove: &proto.ActiveProfileRemove{Id: ppolID}, }} } } @@ -562,7 +568,7 @@ func (p *Processor) updateableEndpoints() []*EndpointInfo { // referencesIPSet determines whether the endpoint's policies or profiles reference a given IPSet func (p *Processor) referencesIPSet(ei *EndpointInfo, id string) bool { var found = false - ei.iterateProfiles(func(pid proto.ProfileID) bool { + ei.iterateProfiles(func(pid types.ProfileID) bool { pi := p.profileByID[pid] if pi.referencesIPSet(id) { found = true @@ -575,7 +581,7 @@ func (p *Processor) referencesIPSet(ei *EndpointInfo, id string) bool { return true } // otherwise, check policies - ei.iteratePolicies(func(pid proto.PolicyID) bool { + ei.iteratePolicies(func(pid types.PolicyID) bool { pi := p.policyByID[pid] if pi.referencesIPSet(id) { found = true @@ -591,14 +597,14 @@ func (p *Processor) referencesIPSet(ei *EndpointInfo, id string) bool { func (p *Processor) getIPSetsSync(ei *EndpointInfo) (func(), func()) { // Compute all the IPSets that should be synced. newS := map[string]bool{} - ei.iterateProfiles(func(id proto.ProfileID) bool { + ei.iterateProfiles(func(id types.ProfileID) bool { pi := p.profileByID[id] for ipset := range pi.refs { newS[ipset] = true } return false }) - ei.iteratePolicies(func(id proto.PolicyID) bool { + ei.iteratePolicies(func(id types.PolicyID) bool { pi := p.policyByID[id] for ipset := range pi.refs { newS[ipset] = true @@ -644,9 +650,9 @@ func (p *Processor) sendIPSetRemove(ei *EndpointInfo, id string) { } // Perform the action on every policy on the Endpoint, breaking if the action returns true. -func (ei *EndpointInfo) iteratePolicies(action func(id proto.PolicyID) (stop bool)) { - var pId proto.PolicyID - seen := make(map[proto.PolicyID]bool) +func (ei *EndpointInfo) iteratePolicies(action func(id types.PolicyID) (stop bool)) { + var pId types.PolicyID + seen := make(map[types.PolicyID]bool) for _, tier := range ei.endpointUpd.GetEndpoint().GetTiers() { pId.Tier = tier.Name for _, name := range tier.GetIngressPolicies() { @@ -670,8 +676,8 @@ func (ei *EndpointInfo) iteratePolicies(action func(id proto.PolicyID) (stop boo } // Perform the action on every profile on the Endpoint, breaking if the action returns true. -func (ei *EndpointInfo) iterateProfiles(action func(id proto.ProfileID) (stop bool)) { - var pId proto.ProfileID +func (ei *EndpointInfo) iterateProfiles(action func(id types.ProfileID) (stop bool)) { + var pId types.ProfileID for _, name := range ei.endpointUpd.GetEndpoint().GetProfileIds() { pId.Name = name if action(pId) { diff --git a/felix/policysync/processor_test.go b/felix/policysync/processor_test.go index 7c34e8885e8..118f4f41086 100644 --- a/felix/policysync/processor_test.go +++ b/felix/policysync/processor_test.go @@ -26,13 +26,15 @@ import ( . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" - "github.com/onsi/gomega/types" + gomegatypes "github.com/onsi/gomega/types" "google.golang.org/grpc" "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/insecure" + googleproto "google.golang.org/protobuf/proto" "github.com/projectcalico/calico/felix/policysync" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/pod2daemon/binder" ) @@ -123,39 +125,45 @@ var _ = Describe("Processor", func() { Context("on new join", func() { var output chan proto.ToDataplane - var accounts [3]proto.ServiceAccountID + var accounts [3]types.ServiceAccountID BeforeEach(func() { output, _ = join("test", 1) for i := 0; i < 3; i++ { msg := <-output - accounts[i] = *msg.GetServiceAccountUpdate().Id + accounts[i] = types.ProtoToServiceAccountID( + msg.GetServiceAccountUpdate().GetId(), + ) } }) It("should get 3 updates", func() { - Expect(accounts).To(ContainElement(proto.ServiceAccountID{ + Expect(accounts).To(ContainElement(types.ServiceAccountID{ Name: "test_serviceaccount0", Namespace: "test_namespace0"})) - Expect(accounts).To(ContainElement(proto.ServiceAccountID{ + Expect(accounts).To(ContainElement(types.ServiceAccountID{ Name: "test_serviceaccount0", Namespace: "test_namespace1"})) - Expect(accounts).To(ContainElement(proto.ServiceAccountID{ + Expect(accounts).To(ContainElement(types.ServiceAccountID{ Name: "test_serviceaccount1", Namespace: "test_namespace0"})) }) It("should pass updates", func() { updateServiceAccount("t0", "t5") msg := <-output - Expect(msg.GetServiceAccountUpdate().GetId()).To(Equal( - &proto.ServiceAccountID{Name: "t0", Namespace: "t5"}, - )) + equal := googleproto.Equal( + msg.GetServiceAccountUpdate().GetId(), &proto.ServiceAccountID{Name: "t0", Namespace: "t5"}, + ) + Expect(equal).To(BeTrue()) + }) It("should pass removes", func() { removeServiceAccount("test_serviceaccount0", "test_namespace0") msg := <-output - Expect(msg.GetServiceAccountRemove().GetId()).To(Equal(&proto.ServiceAccountID{ - Name: "test_serviceaccount0", Namespace: "test_namespace0"}, - )) + equal := googleproto.Equal( + msg.GetServiceAccountRemove().GetId(), + &proto.ServiceAccountID{Name: "test_serviceaccount0", Namespace: "test_namespace0"}, + ) + Expect(equal).To(BeTrue()) }) }) }) @@ -166,12 +174,12 @@ var _ = Describe("Processor", func() { BeforeEach(func() { for i := 0; i < 2; i++ { w := fmt.Sprintf("test%d", i) - d := testId(w) + d := types.WorkloadEndpointIDToProto(testId(w)) output[i], _ = join(w, uint64(i)) // Ensure the joins are completed by sending a workload endpoint for each. updates <- &proto.WorkloadEndpointUpdate{ - Id: &d, + Id: d, Endpoint: &proto.WorkloadEndpoint{}, } <-output[i] @@ -238,32 +246,36 @@ var _ = Describe("Processor", func() { Context("on new join", func() { var output chan proto.ToDataplane - var accounts [3]proto.NamespaceID + var accounts [3]types.NamespaceID BeforeEach(func() { output, _ = join("test", 1) for i := 0; i < 3; i++ { msg := <-output - accounts[i] = *msg.GetNamespaceUpdate().Id + accounts[i] = types.ProtoToNamespaceID( + msg.GetNamespaceUpdate().GetId(), + ) } }) It("should get 3 updates", func() { - Expect(accounts).To(ContainElement(proto.NamespaceID{Name: "test_namespace0"})) - Expect(accounts).To(ContainElement(proto.NamespaceID{Name: "test_namespace1"})) - Expect(accounts).To(ContainElement(proto.NamespaceID{Name: "test_namespace2"})) + Expect(accounts).To(ContainElement(types.NamespaceID{Name: "test_namespace0"})) + Expect(accounts).To(ContainElement(types.NamespaceID{Name: "test_namespace1"})) + Expect(accounts).To(ContainElement(types.NamespaceID{Name: "test_namespace2"})) }) It("should pass updates", func() { updateNamespace("t0") msg := <-output - Expect(msg.GetNamespaceUpdate().GetId()).To(Equal(&proto.NamespaceID{Name: "t0"})) + equal := googleproto.Equal(msg.GetNamespaceUpdate().GetId(), &proto.NamespaceID{Name: "t0"}) + Expect(equal).To(BeTrue()) }) It("should pass removes", func() { removeNamespace("test_namespace0") msg := <-output - Expect(msg.GetNamespaceRemove().GetId()).To(Equal(&proto.NamespaceID{Name: "test_namespace0"})) + equal := googleproto.Equal(msg.GetNamespaceRemove().GetId(), &proto.NamespaceID{Name: "test_namespace0"}) + Expect(equal).To(BeTrue()) }) }) }) @@ -274,12 +286,12 @@ var _ = Describe("Processor", func() { BeforeEach(func() { for i := 0; i < 2; i++ { w := fmt.Sprintf("test%d", i) - d := testId(w) + d := types.WorkloadEndpointIDToProto(testId(w)) output[i], _ = join(w, uint64(i)) // Ensure the joins are completed by sending a workload endpoint for each. updates <- &proto.WorkloadEndpointUpdate{ - Id: &d, + Id: d, Endpoint: &proto.WorkloadEndpoint{}, } <-output[i] @@ -322,8 +334,8 @@ var _ = Describe("Processor", func() { Context("with two joined endpoints, one with active profile", func() { var refdOutput chan proto.ToDataplane var unrefdOutput chan proto.ToDataplane - var refdId proto.WorkloadEndpointID - var unrefdId proto.WorkloadEndpointID + var refdId types.WorkloadEndpointID + var unrefdId types.WorkloadEndpointID var assertInactiveNoUpdate func() var proUpd *proto.ActiveProfileUpdate var ipSetUpd *proto.IPSetUpdate @@ -336,19 +348,21 @@ var _ = Describe("Processor", func() { // Ensure the joins are completed by sending a workload endpoint for each. refUpd := &proto.WorkloadEndpointUpdate{ - Id: &refdId, + Id: types.WorkloadEndpointIDToProto(refdId), Endpoint: &proto.WorkloadEndpoint{}, } updates <- refUpd g := <-refdOutput - Expect(&g).To(HavePayload(refUpd)) + equal := googleproto.Equal(g.GetWorkloadEndpointUpdate(), refUpd) + Expect(equal).To(BeTrue()) unrefUpd := &proto.WorkloadEndpointUpdate{ - Id: &unrefdId, + Id: types.WorkloadEndpointIDToProto(unrefdId), Endpoint: &proto.WorkloadEndpoint{}, } updates <- unrefUpd g = <-unrefdOutput - Expect(&g).To(HavePayload(unrefUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), unrefUpd) + Expect(equal).To(BeTrue()) // Send the IPSet, a Profile referring to it, and a WEP update referring to the // Profile. This "activates" the WEP relative to the IPSet @@ -365,29 +379,33 @@ var _ = Describe("Processor", func() { } updates <- proUpd wepUpd := &proto.WorkloadEndpointUpdate{ - Id: &refdId, + Id: types.WorkloadEndpointIDToProto(unrefdId), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{ProfileName}}, } updates <- wepUpd // All three updates get pushed to the active endpoint (1) g = <-refdOutput - Expect(&g).To(HavePayload(ipSetUpd)) + equal = googleproto.Equal(g.GetIpsetUpdate(), ipSetUpd) + Expect(equal).To(BeTrue()) g = <-refdOutput - Expect(&g).To(HavePayload(proUpd)) + equal = googleproto.Equal(g.GetActiveProfileUpdate(), proUpd) + Expect(equal).To(BeTrue()) g = <-refdOutput - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) assertInactiveNoUpdate = func() { // Send a WEP update for the inactive and check we get it from the output // channel. This ensures that the inactive endpoint didn't get the IPSetUpdate // without having to wait for a timeout. u := &proto.WorkloadEndpointUpdate{ - Id: &unrefdId, + Id: types.WorkloadEndpointIDToProto(unrefdId), Endpoint: &proto.WorkloadEndpoint{}, } updates <- u g := <-unrefdOutput - Expect(&g).To(HavePayload(u)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), u) + Expect(equal).To(BeTrue()) } close(done) @@ -397,7 +415,8 @@ var _ = Describe("Processor", func() { msg := updateIpSet(IPSetName, 2) updates <- msg g := <-refdOutput - Expect(g).To(Equal(proto.ToDataplane{Payload: &proto.ToDataplane_IpsetUpdate{IpsetUpdate: msg}})) + equal := googleproto.Equal(&g, &proto.ToDataplane{Payload: &proto.ToDataplane_IpsetUpdate{IpsetUpdate: msg}}) + Expect(equal).To(BeTrue()) assertInactiveNoUpdate() close(done) @@ -411,8 +430,9 @@ var _ = Describe("Processor", func() { msg2 := deltaUpdateIpSet(IPSetName, 2, 2) updates <- msg2 g := <-refdOutput - Expect(g).To(Equal(proto.ToDataplane{ - Payload: &proto.ToDataplane_IpsetDeltaUpdate{IpsetDeltaUpdate: msg2}})) + equal := googleproto.Equal(&g, &proto.ToDataplane{ + Payload: &proto.ToDataplane_IpsetDeltaUpdate{IpsetDeltaUpdate: msg2}}) + Expect(equal).To(BeTrue()) msg2 = deltaUpdateIpSet(IPSetName, 2, 0) updates <- msg2 @@ -437,34 +457,42 @@ var _ = Describe("Processor", func() { It("should send IPSetUpdate when endpoint newly refs wep update", func(done Done) { wepUpd := &proto.WorkloadEndpointUpdate{ - Id: &unrefdId, + Id: types.WorkloadEndpointIDToProto(unrefdId), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{ProfileName}}, } updates <- wepUpd g := <-unrefdOutput - Expect(&g).To(HavePayload(ipSetUpd)) + equal := googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) g = <-unrefdOutput - Expect(&g).To(HavePayload(proUpd)) + equal = googleproto.Equal(g.GetActiveProfileUpdate(), proUpd) + Expect(equal).To(BeTrue()) g = <-unrefdOutput - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetIpsetUpdate(), ipSetUpd) + Expect(equal).To(BeTrue()) close(done) }) It("should send IPSetRemove when endpoint stops ref wep update", func(done Done) { wepUpd := &proto.WorkloadEndpointUpdate{ - Id: &refdId, + Id: types.WorkloadEndpointIDToProto(unrefdId), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{}}, } updates <- wepUpd g := <-refdOutput - Expect(&g).To(HavePayload(wepUpd)) + equal := googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) g = <-refdOutput - Expect(&g).To(HavePayload(&proto.ActiveProfileRemove{Id: &proto.ProfileID{Name: ProfileName}})) + equal = googleproto.Equal( + g.GetActiveProfileRemove(), &proto.ActiveProfileRemove{Id: &proto.ProfileID{Name: ProfileName}}, + ) + Expect(equal).To(BeTrue()) g = <-refdOutput - Expect(&g).To(HavePayload(&proto.IPSetRemove{Id: IPSetName})) + equal = googleproto.Equal(g.GetIpsetRemove(), &proto.IPSetRemove{Id: IPSetName}) + Expect(equal).To(BeTrue()) // Remove the IPSet since nothing references it. updates <- removeIpSet(IPSetName) @@ -472,7 +500,8 @@ var _ = Describe("Processor", func() { // Send & receive a repeat WEPUpdate to ensure we didn't get a second remove. updates <- wepUpd g = <-refdOutput - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) assertInactiveNoUpdate() close(done) @@ -497,7 +526,8 @@ var _ = Describe("Processor", func() { Expect(g.GetIpsetUpdate().GetMembers()).To(HaveLen(6)) g = <-refdOutput - Expect(&g).To(HavePayload(pu)) + equal := googleproto.Equal(g.GetActiveProfileUpdate(), pu) + Expect(equal).To(BeTrue()) assertInactiveNoUpdate() @@ -569,7 +599,7 @@ var _ = Describe("Processor", func() { } updates <- pu wepu := &proto.WorkloadEndpointUpdate{ - Id: &unrefdId, + Id: types.WorkloadEndpointIDToProto(unrefdId), Endpoint: &proto.WorkloadEndpoint{ Tiers: []*proto.TierInfo{ { @@ -634,7 +664,7 @@ var _ = Describe("Processor", func() { } updates <- pu wepu := &proto.WorkloadEndpointUpdate{ - Id: &unrefdId, + Id: types.WorkloadEndpointIDToProto(unrefdId), Endpoint: &proto.WorkloadEndpoint{ Tiers: []*proto.TierInfo{ { @@ -718,7 +748,7 @@ var _ = Describe("Processor", func() { }) Context("with joined, active endpoint", func() { - var wepId proto.WorkloadEndpointID + var wepId types.WorkloadEndpointID var syncClient proto.PolicySyncClient var clientConn *grpc.ClientConn var syncContext context.Context @@ -752,7 +782,7 @@ var _ = Describe("Processor", func() { } updates <- pu wepUpd := &proto.WorkloadEndpointUpdate{ - Id: &wepId, + Id: types.WorkloadEndpointIDToProto(wepId), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{ProfileName}}, } updates <- wepUpd @@ -764,10 +794,12 @@ var _ = Describe("Processor", func() { Expect(g.GetIpsetUpdate().GetMembers()).To(HaveLen(0)) g, err = syncStream.Recv() Expect(err).ToNot(HaveOccurred()) - Expect(g).To(HavePayload(pu)) + equal := googleproto.Equal(g.GetActiveProfileUpdate(), pu) + Expect(equal).To(BeTrue()) g, err = syncStream.Recv() Expect(err).ToNot(HaveOccurred()) - Expect(g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) close(done) }, 2) @@ -867,13 +899,13 @@ var _ = Describe("Processor", func() { Context("with two joined endpoints", func() { var output [2]chan proto.ToDataplane - var wepID [2]proto.WorkloadEndpointID + var wepID [2]types.WorkloadEndpointID var assertNoUpdate func(i int) BeforeEach(func() { assertNoUpdate = func(i int) { wepu := &proto.WorkloadEndpointUpdate{ - Id: &wepID[i], + Id: types.WorkloadEndpointIDToProto(wepID[i]), Endpoint: &proto.WorkloadEndpoint{}, } updates <- wepu @@ -905,19 +937,20 @@ var _ = Describe("Processor", func() { It("should add & remove profile when ref'd or not by WEP", func(done Done) { msg := &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{ProfileName}}, } updates <- msg g := <-output[0] - Expect(&g).To(HavePayload(proUpdate)) + equal := googleproto.Equal(g.GetActiveProfileUpdate(), proUpdate) + Expect(equal).To(BeTrue()) g = <-output[0] Expect(&g).To(HavePayload(msg)) // Remove reference msg = &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{}}, } updates <- msg @@ -925,7 +958,8 @@ var _ = Describe("Processor", func() { g = <-output[0] Expect(&g).To(HavePayload(msg)) g = <-output[0] - Expect(&g).To(HavePayload(&proto.ActiveProfileRemove{Id: &profileID})) + equal = googleproto.Equal(g.GetActiveProfileRemove(), &proto.ActiveProfileRemove{Id: &profileID}) + Expect(equal).To(BeTrue()) assertNoUpdate(1) @@ -948,31 +982,34 @@ var _ = Describe("Processor", func() { updates <- msg msg2 := &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{ProfileName}}, } updates <- msg2 g := <-output[0] - Expect(&g).To(HavePayload(proUpdate)) + equal := googleproto.Equal(g.GetActiveProfileUpdate(), proUpdate) + Expect(equal).To(BeTrue()) g = <-output[0] Expect(&g).To(HavePayload(msg2)) // Switch profiles msg2 = &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{newName}}, } updates <- msg2 g = <-output[0] - Expect(&g).To(HavePayload(msg)) + equal = googleproto.Equal(g.GetActiveProfileUpdate(), msg) + Expect(equal).To(BeTrue()) g = <-output[0] Expect(&g).To(HavePayload(msg2)) g = <-output[0] - Expect(&g).To(HavePayload(&proto.ActiveProfileRemove{Id: &profileID})) + equal = googleproto.Equal(g.GetActiveProfileRemove(), &proto.ActiveProfileRemove{Id: &profileID}) + Expect(equal).To(BeTrue()) assertNoUpdate(1) @@ -1001,7 +1038,7 @@ var _ = Describe("Processor", func() { It("should add & remove policy when ref'd or not by WEP", func(done Done) { msg := &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{Tiers: []*proto.TierInfo{ { Name: TierName, @@ -1011,14 +1048,15 @@ var _ = Describe("Processor", func() { } updates <- msg g := <-output[0] - Expect(&g).To(HavePayload(polUpd)) + equal := googleproto.Equal(g.GetActivePolicyUpdate(), polUpd) + Expect(equal).To(BeTrue()) g = <-output[0] Expect(&g).To(HavePayload(msg)) // Remove reference msg = &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{Tiers: []*proto.TierInfo{ { Name: TierName, @@ -1030,7 +1068,8 @@ var _ = Describe("Processor", func() { g = <-output[0] Expect(&g).To(HavePayload(msg)) g = <-output[0] - Expect(&g).To(HavePayload(&proto.ActivePolicyRemove{Id: &policyID})) + equal = googleproto.Equal(g.GetActivePolicyRemove(), &proto.ActivePolicyRemove{Id: &policyID}) + Expect(equal).To(BeTrue()) assertNoUpdate(1) @@ -1053,7 +1092,7 @@ var _ = Describe("Processor", func() { updates <- msg msg2 := &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{Tiers: []*proto.TierInfo{ { Name: TierName, @@ -1063,14 +1102,15 @@ var _ = Describe("Processor", func() { } updates <- msg2 g := <-output[0] - Expect(&g).To(HavePayload(polUpd)) + equal := googleproto.Equal(g.GetActivePolicyUpdate(), polUpd) + Expect(equal).To(BeTrue()) g = <-output[0] Expect(&g).To(HavePayload(msg2)) // Switch profiles msg2 = &proto.WorkloadEndpointUpdate{ - Id: &wepID[0], + Id: types.WorkloadEndpointIDToProto(wepID[0]), Endpoint: &proto.WorkloadEndpoint{Tiers: []*proto.TierInfo{ { Name: TierName, @@ -1081,13 +1121,15 @@ var _ = Describe("Processor", func() { updates <- msg2 g = <-output[0] - Expect(&g).To(HavePayload(msg)) + equal = googleproto.Equal(g.GetActivePolicyUpdate(), msg) + Expect(equal).To(BeTrue()) g = <-output[0] Expect(&g).To(HavePayload(msg2)) g = <-output[0] - Expect(&g).To(HavePayload(&proto.ActivePolicyRemove{Id: &policyID})) + equal = googleproto.Equal(g.GetActivePolicyRemove(), &proto.ActivePolicyRemove{Id: &policyID}) + Expect(equal).To(BeTrue()) // Calc graph removes the old policy. updates <- &proto.ActivePolicyRemove{Id: &policyID} @@ -1114,7 +1156,7 @@ var _ = Describe("Processor", func() { Id: &profileID, } wepUpd = &proto.WorkloadEndpointUpdate{ - Id: &wepId, + Id: types.WorkloadEndpointIDToProto(wepId), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{ProfileName}}, } updates <- proUpdate @@ -1125,10 +1167,12 @@ var _ = Describe("Processor", func() { output, _ := join("test", 1) g := <-output - Expect(&g).To(HavePayload(proUpdate)) + equal := googleproto.Equal(g.GetActiveProfileUpdate(), proUpdate) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) close(done) }) @@ -1136,18 +1180,22 @@ var _ = Describe("Processor", func() { It("should resync profile & wep", func(done Done) { output, jm := join("test", 1) g := <-output - Expect(&g).To(HavePayload(proUpdate)) + equal := googleproto.Equal(g.GetActiveProfileUpdate(), proUpdate) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) // Leave leave(jm) output, _ = join("test", 2) g = <-output - Expect(&g).To(HavePayload(proUpdate)) + equal = googleproto.Equal(g.GetActiveProfileUpdate(), proUpdate) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) close(done) }) @@ -1155,23 +1203,26 @@ var _ = Describe("Processor", func() { It("should not resync removed profile", func(done Done) { output, jm := join("test", 1) g := <-output - Expect(&g).To(HavePayload(proUpdate)) + equal := googleproto.Equal(g.GetActiveProfileUpdate(), proUpdate) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) // Leave leave(jm) // Remove reference to profile from WEP wepUpd2 := &proto.WorkloadEndpointUpdate{ - Id: &wepId, + Id: types.WorkloadEndpointIDToProto(wepId), Endpoint: &proto.WorkloadEndpoint{ProfileIds: []string{}}, } updates <- wepUpd2 output, _ = join("test", 2) g = <-output - Expect(&g).To(HavePayload(wepUpd2)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd2) + Expect(equal).To(BeTrue()) close(done) }) @@ -1190,7 +1241,7 @@ var _ = Describe("Processor", func() { } updates <- polUpd wepUpd = &proto.WorkloadEndpointUpdate{ - Id: &wepId, + Id: types.WorkloadEndpointIDToProto(wepId), Endpoint: &proto.WorkloadEndpoint{Tiers: []*proto.TierInfo{ { Name: TierName, @@ -1205,10 +1256,12 @@ var _ = Describe("Processor", func() { output, _ := join("test", 1) g := <-output - Expect(&g).To(HavePayload(polUpd)) + equal := googleproto.Equal(g.GetActivePolicyUpdate(), polUpd) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) close(done) }) @@ -1216,18 +1269,22 @@ var _ = Describe("Processor", func() { It("should resync policy & wep", func(done Done) { output, jm := join("test", 1) g := <-output - Expect(&g).To(HavePayload(polUpd)) + equal := googleproto.Equal(g.GetActivePolicyUpdate(), polUpd) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) // Leave leave(jm) output, _ = join("test", 2) g = <-output - Expect(&g).To(HavePayload(polUpd)) + equal = googleproto.Equal(g.GetActivePolicyUpdate(), polUpd) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) close(done) }) @@ -1235,16 +1292,18 @@ var _ = Describe("Processor", func() { It("should not resync removed policy", func(done Done) { output, jm := join("test", 1) g := <-output - Expect(&g).To(HavePayload(polUpd)) + equal := googleproto.Equal(g.GetActivePolicyUpdate(), polUpd) + Expect(equal).To(BeTrue()) g = <-output - Expect(&g).To(HavePayload(wepUpd)) + equal = googleproto.Equal(g.GetWorkloadEndpointUpdate(), wepUpd) + Expect(equal).To(BeTrue()) // Leave leave(jm) // Remove reference to policy from WEP wepUpd2 := &proto.WorkloadEndpointUpdate{ - Id: &wepId, + Id: types.WorkloadEndpointIDToProto(wepId), } updates <- wepUpd2 @@ -1265,7 +1324,7 @@ var _ = Describe("Processor", func() { BeforeEach(func() { wepUpd = &proto.WorkloadEndpointUpdate{ - Id: &wepId, + Id: types.WorkloadEndpointIDToProto(wepId), } updates <- wepUpd }) @@ -1309,7 +1368,7 @@ var _ = Describe("Processor", func() { g := <-c Expect(&g).To(HavePayload(wepUpd)) - rm := &proto.WorkloadEndpointRemove{Id: &wepId} + rm := &proto.WorkloadEndpointRemove{Id: types.WorkloadEndpointIDToProto(wepId)} updates <- rm g = <-c Expect(&g).To(HavePayload(rm)) @@ -1339,7 +1398,8 @@ var _ = Describe("Processor", func() { updates <- is for i := 0; i < 2; i++ { g := <-c[i] - Expect(&g).To(HavePayload(is)) + equal := googleproto.Equal(g.GetInSync(), is) + Expect(equal).To(BeTrue()) } close(done) }) @@ -1347,8 +1407,8 @@ var _ = Describe("Processor", func() { }) }) -func testId(w string) proto.WorkloadEndpointID { - return proto.WorkloadEndpointID{ +func testId(w string) types.WorkloadEndpointID { + return types.WorkloadEndpointID{ OrchestratorId: policysync.OrchestratorId, WorkloadId: w, EndpointId: policysync.EndpointId, @@ -1452,12 +1512,12 @@ func (t testCreds) Clone() credentials.TransportCredentials { func (t testCreds) OverrideServerName(string) error { return nil } -func HavePayload(expected interface{}) types.GomegaMatcher { +func HavePayload(expected interface{}) gomegatypes.GomegaMatcher { return &payloadMatcher{equal: Equal(expected)} } type payloadMatcher struct { - equal types.GomegaMatcher + equal gomegatypes.GomegaMatcher payload interface{} } diff --git a/felix/policysync/server.go b/felix/policysync/server.go index f1af2c037ed..0b9228ade1b 100644 --- a/felix/policysync/server.go +++ b/felix/policysync/server.go @@ -21,6 +21,7 @@ import ( log "github.com/sirupsen/logrus" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/pod2daemon/binder" "google.golang.org/grpc" @@ -37,6 +38,7 @@ const OutputQueueLen = 100 // There is a single instance of the Server, it disambiguates connections from different clients by the // credentials present in the gRPC request. type Server struct { + proto.UnimplementedPolicySyncServer JoinUpdates chan<- interface{} nextJoinUID func() uint64 } @@ -76,7 +78,7 @@ func (s *Server) Sync(_ *proto.SyncRequest, stream proto.PolicySync_SyncServer) // Send a join request to the processor to ask it to start sending us updates. updates := make(chan proto.ToDataplane, OutputQueueLen) - epID := proto.WorkloadEndpointID{ + epID := types.WorkloadEndpointID{ OrchestratorId: OrchestratorId, EndpointId: EndpointId, WorkloadId: workloadID, diff --git a/felix/policysync/server_test.go b/felix/policysync/server_test.go index 835d3fda4cb..43972038086 100644 --- a/felix/policysync/server_test.go +++ b/felix/policysync/server_test.go @@ -56,7 +56,7 @@ var _ = Describe("Server", func() { }() j := <-joins jr := j.(policysync.JoinRequest) - Expect(jr.EndpointID.GetWorkloadId()).To(Equal(WorkloadID)) + Expect(jr.EndpointID.WorkloadId).To(Equal(WorkloadID)) updates = jr.C close(done) }) @@ -97,7 +97,7 @@ var _ = Describe("Server", func() { } j := <-joins lr := j.(policysync.LeaveRequest) - Expect(lr.EndpointID.GetWorkloadId()).To(Equal(WorkloadID)) + Expect(lr.EndpointID.WorkloadId).To(Equal(WorkloadID)) close(updates) <-syncDone close(done) @@ -116,7 +116,7 @@ var _ = Describe("Server", func() { } j := <-joins lr := j.(policysync.LeaveRequest) - Expect(lr.EndpointID.GetWorkloadId()).To(Equal(WorkloadID)) + Expect(lr.EndpointID.WorkloadId).To(Equal(WorkloadID)) <-syncDone close(done) }) diff --git a/felix/rules/dispatch.go b/felix/rules/dispatch.go index c99a16d8594..7ac762688a2 100644 --- a/felix/rules/dispatch.go +++ b/felix/rules/dispatch.go @@ -22,10 +22,11 @@ import ( . "github.com/projectcalico/calico/felix/iptables" "github.com/projectcalico/calico/felix/proto" "github.com/projectcalico/calico/felix/stringutils" + "github.com/projectcalico/calico/felix/types" ) func (r *DefaultRuleRenderer) WorkloadDispatchChains( - endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint, + endpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint, ) []*Chain { // Extract endpoint names. log.WithField("numEndpoints", len(endpoints)).Debug("Rendering workload dispatch chains") @@ -55,7 +56,7 @@ func (r *DefaultRuleRenderer) WorkloadDispatchChains( } func (r *DefaultRuleRenderer) WorkloadInterfaceAllowChains( - endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint, + endpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint, ) []*Chain { // Extract endpoint names. log.WithField("numEndpoints", len(endpoints)).Debug("Rendering workload interface allow chain") @@ -103,8 +104,8 @@ func (r *DefaultRuleRenderer) WorkloadInterfaceAllowChains( // a from-endpoint-mark chain to jump to a corresponding endpoint chain matching on its endpoint mark. func (r *DefaultRuleRenderer) EndpointMarkDispatchChains( epMarkMapper EndpointMarkMapper, - wlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint, - hepEndpoints map[string]proto.HostEndpointID, + wlEndpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint, + hepEndpoints map[string]types.HostEndpointID, ) []*Chain { // Extract endpoint names. logCxt := log.WithFields(log.Fields{ @@ -134,7 +135,7 @@ func (r *DefaultRuleRenderer) EndpointMarkDispatchChains( } func (r *DefaultRuleRenderer) HostDispatchChains( - endpoints map[string]proto.HostEndpointID, + endpoints map[string]types.HostEndpointID, defaultIfaceName string, applyOnForward bool, ) []*Chain { @@ -143,7 +144,7 @@ func (r *DefaultRuleRenderer) HostDispatchChains( // For pre-DNAT policy, which only applies on ingress from a host endpoint. func (r *DefaultRuleRenderer) FromHostDispatchChains( - endpoints map[string]proto.HostEndpointID, + endpoints map[string]types.HostEndpointID, defaultIfaceName string, ) []*Chain { return r.hostDispatchChains(endpoints, defaultIfaceName, "from", false) @@ -151,14 +152,14 @@ func (r *DefaultRuleRenderer) FromHostDispatchChains( // For applying normal host endpoint egress policy to traffic from the host which has been DNAT'd. func (r *DefaultRuleRenderer) ToHostDispatchChains( - endpoints map[string]proto.HostEndpointID, + endpoints map[string]types.HostEndpointID, defaultIfaceName string, ) []*Chain { return r.hostDispatchChains(endpoints, defaultIfaceName, "to", false) } func (r *DefaultRuleRenderer) hostDispatchChains( - endpoints map[string]proto.HostEndpointID, + endpoints map[string]types.HostEndpointID, defaultIfaceName string, directions string, applyOnForward bool, diff --git a/felix/rules/dispatch_test.go b/felix/rules/dispatch_test.go index a6bfe86fe43..4752f84d891 100644 --- a/felix/rules/dispatch_test.go +++ b/felix/rules/dispatch_test.go @@ -16,6 +16,7 @@ package rules_test import ( . "github.com/projectcalico/calico/felix/rules" + "github.com/projectcalico/calico/felix/types" "fmt" @@ -67,12 +68,12 @@ var _ = Describe("Dispatch chains", func() { }) It("should panic if interface name is empty", func() { - endpointID := proto.WorkloadEndpointID{ + endpointID := types.WorkloadEndpointID{ OrchestratorId: "foobar", WorkloadId: "workload", EndpointId: "noname", } - input := map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{ + input := map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{ endpointID: {}, } Expect(func() { renderer.WorkloadDispatchChains(input) }).To(Panic()) @@ -80,11 +81,11 @@ var _ = Describe("Dispatch chains", func() { DescribeTable("workload rendering tests", func(names []string, expectedChains map[bool][]*iptables.Chain) { - var input map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint + var input map[types.WorkloadEndpointID]*proto.WorkloadEndpoint if names != nil { - input = map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint{} + input = map[types.WorkloadEndpointID]*proto.WorkloadEndpoint{} for i, name := range names { - id := proto.WorkloadEndpointID{ + id := types.WorkloadEndpointID{ OrchestratorId: "foobar", WorkloadId: fmt.Sprintf("workload-%v", i), EndpointId: name, @@ -99,7 +100,7 @@ var _ = Describe("Dispatch chains", func() { var result []*iptables.Chain if kubeIPVSEnabled { result = append(renderer.WorkloadDispatchChains(input), - renderer.EndpointMarkDispatchChains(epMarkMapper, input, map[string]proto.HostEndpointID{})...) + renderer.EndpointMarkDispatchChains(epMarkMapper, input, map[string]types.HostEndpointID{})...) } else { result = renderer.WorkloadDispatchChains(input) } @@ -495,12 +496,12 @@ var _ = Describe("Dispatch chains", func() { ) Describe("host endpoint rendering tests", func() { - convertToInput := func(names []string, expectedChains []*iptables.Chain) map[string]proto.HostEndpointID { - var input map[string]proto.HostEndpointID + convertToInput := func(names []string, expectedChains []*iptables.Chain) map[string]types.HostEndpointID { + var input map[string]types.HostEndpointID if names != nil { - input = map[string]proto.HostEndpointID{} + input = map[string]types.HostEndpointID{} for _, name := range names { - input[name] = proto.HostEndpointID{} // Data is currently ignored. + input[name] = types.HostEndpointID{} // Data is currently ignored. } } diff --git a/felix/rules/endpoints.go b/felix/rules/endpoints.go index d6322da2568..bf6970de6a7 100644 --- a/felix/rules/endpoints.go +++ b/felix/rules/endpoints.go @@ -25,7 +25,7 @@ import ( "github.com/projectcalico/calico/felix/hashutils" . "github.com/projectcalico/calico/felix/iptables" - "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" ) const ( @@ -347,7 +347,7 @@ func (r *DefaultRuleRenderer) PolicyGroupToIptablesChains(group *PolicyGroup) [] chainToJumpTo := PolicyChainName( polChainPrefix, - &proto.PolicyID{Name: polName}, + &types.PolicyID{Name: polName}, ) rules = append(rules, Rule{ Match: match, @@ -424,7 +424,7 @@ func (r *DefaultRuleRenderer) endpointIptablesChain(policyGroups []*PolicyGroup, for _, p := range polGroup.PolicyNames { chainsToJumpTo = append(chainsToJumpTo, PolicyChainName( policyPrefix, - &proto.PolicyID{Name: p}, + &types.PolicyID{Name: p}, )) } } else { @@ -486,7 +486,7 @@ func (r *DefaultRuleRenderer) endpointIptablesChain(policyGroups []*PolicyGroup, if chainType == chainTypeNormal { // Then, jump to each profile in turn. for _, profileID := range profileIds { - profChainName := ProfileChainName(profilePrefix, &proto.ProfileID{Name: profileID}) + profChainName := ProfileChainName(profilePrefix, &types.ProfileID{Name: profileID}) rules = append(rules, Rule{Action: JumpAction{Target: profChainName}}, // If policy marked packet as accepted, it returns, setting the diff --git a/felix/rules/policy.go b/felix/rules/policy.go index 1a38f46d709..efd5b2b2573 100644 --- a/felix/rules/policy.go +++ b/felix/rules/policy.go @@ -24,11 +24,12 @@ import ( "github.com/projectcalico/calico/felix/ipsets" "github.com/projectcalico/calico/felix/iptables" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" ) // ruleRenderer defined in rules_defs.go. -func (r *DefaultRuleRenderer) PolicyToIptablesChains(policyID *proto.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain { +func (r *DefaultRuleRenderer) PolicyToIptablesChains(policyID *types.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain { inbound := iptables.Chain{ Name: PolicyChainName(PolicyInboundPfx, policyID), Rules: r.ProtoRulesToIptablesRules(policy.InboundRules, ipVersion, fmt.Sprintf("Policy %s ingress", policyID.Name)), @@ -40,7 +41,7 @@ func (r *DefaultRuleRenderer) PolicyToIptablesChains(policyID *proto.PolicyID, p return []*iptables.Chain{&inbound, &outbound} } -func (r *DefaultRuleRenderer) ProfileToIptablesChains(profileID *proto.ProfileID, profile *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) { +func (r *DefaultRuleRenderer) ProfileToIptablesChains(profileID *types.ProfileID, profile *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) { inbound = &iptables.Chain{ Name: ProfileChainName(ProfileInboundPfx, profileID), Rules: r.ProtoRulesToIptablesRules(profile.InboundRules, ipVersion, fmt.Sprintf("Profile %s ingress", profileID.Name)), @@ -792,7 +793,7 @@ func (r *DefaultRuleRenderer) CalculateRuleMatch(pRule *proto.Rule, ipVersion ui return match } -func PolicyChainName(prefix PolicyChainNamePrefix, polID *proto.PolicyID) string { +func PolicyChainName(prefix PolicyChainNamePrefix, polID *types.PolicyID) string { return hashutils.GetLengthLimitedID( string(prefix), polID.Name, @@ -800,7 +801,7 @@ func PolicyChainName(prefix PolicyChainNamePrefix, polID *proto.PolicyID) string ) } -func ProfileChainName(prefix ProfileChainNamePrefix, profID *proto.ProfileID) string { +func ProfileChainName(prefix ProfileChainNamePrefix, profID *types.ProfileID) string { return hashutils.GetLengthLimitedID( string(prefix), profID.Name, diff --git a/felix/rules/policy_test.go b/felix/rules/policy_test.go index 7e88476c0ab..4381de85f0b 100644 --- a/felix/rules/policy_test.go +++ b/felix/rules/policy_test.go @@ -17,6 +17,7 @@ package rules_test import ( "github.com/projectcalico/calico/felix/environment" . "github.com/projectcalico/calico/felix/rules" + "github.com/projectcalico/calico/felix/types" . "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo/extensions/table" @@ -1084,7 +1085,7 @@ var _ = Describe("rule metadata tests", func() { It("should include a chain name comment", func() { renderer := NewRenderer(rrConfigNormal) chains := renderer.PolicyToIptablesChains( - &proto.PolicyID{ + &types.PolicyID{ Name: "long-policy-name-that-gets-hashed", }, &proto.Policy{ @@ -1120,7 +1121,7 @@ var _ = Describe("rule metadata tests", func() { It("should include a chain name comment", func() { renderer := NewRenderer(rrConfigNormal) inbound, outbound := renderer.ProfileToIptablesChains( - &proto.ProfileID{ + &types.ProfileID{ Name: "long-policy-name-that-gets-hashed", }, &proto.Profile{ diff --git a/felix/rules/rule_defs.go b/felix/rules/rule_defs.go index 091156e4102..d8410a1a716 100644 --- a/felix/rules/rule_defs.go +++ b/felix/rules/rule_defs.go @@ -27,6 +27,7 @@ import ( "github.com/projectcalico/calico/felix/ipsets" "github.com/projectcalico/calico/felix/iptables" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" ) const ( @@ -189,21 +190,21 @@ type RuleRenderer interface { StaticMangleTableChains(ipVersion uint8) []*iptables.Chain StaticFilterForwardAppendRules() []iptables.Rule - WorkloadDispatchChains(map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain + WorkloadDispatchChains(map[types.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain WorkloadEndpointToIptablesChains(ifaceName string, epMarkMapper EndpointMarkMapper, adminUp bool, ingressPolicies []*PolicyGroup, egressPolicies []*PolicyGroup, profileIDs []string) []*iptables.Chain PolicyGroupToIptablesChains(group *PolicyGroup) []*iptables.Chain - WorkloadInterfaceAllowChains(endpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain + WorkloadInterfaceAllowChains(endpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint) []*iptables.Chain EndpointMarkDispatchChains( epMarkMapper EndpointMarkMapper, - wlEndpoints map[proto.WorkloadEndpointID]*proto.WorkloadEndpoint, - hepEndpoints map[string]proto.HostEndpointID, + wlEndpoints map[types.WorkloadEndpointID]*proto.WorkloadEndpoint, + hepEndpoints map[string]types.HostEndpointID, ) []*iptables.Chain - HostDispatchChains(map[string]proto.HostEndpointID, string, bool) []*iptables.Chain - FromHostDispatchChains(map[string]proto.HostEndpointID, string) []*iptables.Chain - ToHostDispatchChains(map[string]proto.HostEndpointID, string) []*iptables.Chain + HostDispatchChains(map[string]types.HostEndpointID, string, bool) []*iptables.Chain + FromHostDispatchChains(map[string]types.HostEndpointID, string) []*iptables.Chain + ToHostDispatchChains(map[string]types.HostEndpointID, string) []*iptables.Chain HostEndpointToFilterChains( ifaceName string, epMarkMapper EndpointMarkMapper, @@ -232,8 +233,8 @@ type RuleRenderer interface { preDNATPolicies []*PolicyGroup, ) []*iptables.Chain - PolicyToIptablesChains(policyID *proto.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain - ProfileToIptablesChains(profileID *proto.ProfileID, policy *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) + PolicyToIptablesChains(policyID *types.PolicyID, policy *proto.Policy, ipVersion uint8) []*iptables.Chain + ProfileToIptablesChains(profileID *types.ProfileID, policy *proto.Profile, ipVersion uint8) (inbound, outbound *iptables.Chain) ProtoRuleToIptablesRules(pRule *proto.Rule, ipVersion uint8) []iptables.Rule MakeNatOutgoingRule(protocol string, action iptables.Action, ipVersion uint8) iptables.Rule diff --git a/felix/statusrep/status_reporter_test.go b/felix/statusrep/status_reporter_test.go index 6a30fec90b5..9938ace212b 100644 --- a/felix/statusrep/status_reporter_test.go +++ b/felix/statusrep/status_reporter_test.go @@ -27,6 +27,7 @@ import ( "github.com/projectcalico/calico/felix/jitter" "github.com/projectcalico/calico/felix/proto" + "github.com/projectcalico/calico/felix/types" "github.com/projectcalico/calico/libcalico-go/lib/backend/model" calierrors "github.com/projectcalico/calico/libcalico-go/lib/errors" ) @@ -71,24 +72,26 @@ var hostEPDown = model.HostEndpointStatus{ Status: "down", } -var protoWlID = proto.WorkloadEndpointID{ +var wlID = types.WorkloadEndpointID{ OrchestratorId: "orch", WorkloadId: "updatedWL", EndpointId: "updatedEP", } +var protoWlID = types.WorkloadEndpointIDToProto(wlID) + var protoUp = proto.EndpointStatus{Status: "up"} var protoDown = proto.EndpointStatus{Status: "down"} var wlEPUpdateUp = proto.WorkloadEndpointStatusUpdate{ - Id: &protoWlID, + Id: protoWlID, Status: &protoUp, } var wlEPRemove = proto.WorkloadEndpointStatusRemove{ - Id: &protoWlID, + Id: protoWlID, } var wlEPUpdateDown = proto.WorkloadEndpointStatusUpdate{ - Id: &protoWlID, + Id: protoWlID, Status: &protoDown, } var updatedWlEPKey = model.WorkloadEndpointStatusKey{ @@ -106,18 +109,21 @@ var updatedWlEPKeyRegion = model.WorkloadEndpointStatusKey{ RegionString: "region-Europe", } -var protoHostID = proto.HostEndpointID{ +var hostID = types.HostEndpointID{ EndpointId: "updatedEP", } + +var protoHostID = types.HostEndpointIDToProto(hostID) + var hostEPUpdateUp = proto.HostEndpointStatusUpdate{ - Id: &protoHostID, + Id: protoHostID, Status: &protoUp, } var hostEPRemove = proto.HostEndpointStatusRemove{ - Id: &protoHostID, + Id: protoHostID, } var hostEPUpdateDown = proto.HostEndpointStatusUpdate{ - Id: &protoHostID, + Id: protoHostID, Status: &protoDown, } var updatedHostEPKey = model.HostEndpointStatusKey{ diff --git a/felix/types/host_endpoint_id.go b/felix/types/host_endpoint_id.go new file mode 100644 index 00000000000..617f794cdbc --- /dev/null +++ b/felix/types/host_endpoint_id.go @@ -0,0 +1,37 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "github.com/projectcalico/calico/felix/proto" + +type HostEndpointID struct { + EndpointId string +} + +func (h HostEndpointID) String() string { + return h.EndpointId +} + +func ProtoToHostEndpointID(h *proto.HostEndpointID) HostEndpointID { + return HostEndpointID{ + EndpointId: h.GetEndpointId(), + } +} + +func HostEndpointIDToProto(h HostEndpointID) *proto.HostEndpointID { + return &proto.HostEndpointID{ + EndpointId: h.EndpointId, + } +} diff --git a/felix/types/host_endpoint_id_test.go b/felix/types/host_endpoint_id_test.go new file mode 100644 index 00000000000..d9ec8ccf356 --- /dev/null +++ b/felix/types/host_endpoint_id_test.go @@ -0,0 +1,77 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "testing" + + googleproto "google.golang.org/protobuf/proto" + + "github.com/projectcalico/calico/felix/proto" +) + +func TestHostEndpointID_String(t *testing.T) { + tests := []struct { + name string + h HostEndpointID + want string + }{ + {"empty", HostEndpointID{}, ""}, + {"non-empty", HostEndpointID{"foo"}, "foo"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.h.String(); got != tt.want { + t.Errorf("HostEndpointID.String() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestProtoToHostEndpointID(t *testing.T) { + tests := []struct { + name string + h *proto.HostEndpointID + want HostEndpointID + }{ + {"empty", &proto.HostEndpointID{}, HostEndpointID{}}, + {"non-empty", &proto.HostEndpointID{EndpointId: "foo"}, HostEndpointID{"foo"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ProtoToHostEndpointID(tt.h); got != tt.want { + t.Errorf("ProtoToHostEndpointID() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestHostEndpointIDToProto(t *testing.T) { + tests := []struct { + name string + h HostEndpointID + want *proto.HostEndpointID + }{ + {"empty", HostEndpointID{}, &proto.HostEndpointID{}}, + {"non-empty", HostEndpointID{"foo"}, &proto.HostEndpointID{EndpointId: "foo"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := HostEndpointIDToProto(tt.h); !googleproto.Equal(got, tt.want) { + t.Errorf("HostEndpointIDToProto() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/felix/types/namespace_id.go b/felix/types/namespace_id.go new file mode 100644 index 00000000000..540b7b27b34 --- /dev/null +++ b/felix/types/namespace_id.go @@ -0,0 +1,33 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "github.com/projectcalico/calico/felix/proto" + +type NamespaceID struct { + Name string +} + +func ProtoToNamespaceID(n *proto.NamespaceID) NamespaceID { + return NamespaceID{ + Name: n.GetName(), + } +} + +func NamespaceIDToProto(n NamespaceID) *proto.NamespaceID { + return &proto.NamespaceID{ + Name: n.Name, + } +} diff --git a/felix/types/namespace_id_test.go b/felix/types/namespace_id_test.go new file mode 100644 index 00000000000..3d338076fdf --- /dev/null +++ b/felix/types/namespace_id_test.go @@ -0,0 +1,59 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "testing" + + googleproto "google.golang.org/protobuf/proto" + + "github.com/projectcalico/calico/felix/proto" +) + +func TestProtoToNamespaceID(t *testing.T) { + tests := []struct { + name string + n *proto.NamespaceID + want NamespaceID + }{ + {"empty", &proto.NamespaceID{}, NamespaceID{}}, + {"non-empty", &proto.NamespaceID{Name: "foo"}, NamespaceID{"foo"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ProtoToNamespaceID(tt.n); got != tt.want { + t.Errorf("ProtoToNamespaceID() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestNamespaceIDToProto(t *testing.T) { + tests := []struct { + name string + n NamespaceID + want *proto.NamespaceID + }{ + {"empty", NamespaceID{}, &proto.NamespaceID{}}, + {"non-empty", NamespaceID{"foo"}, &proto.NamespaceID{Name: "foo"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := NamespaceIDToProto(tt.n); !googleproto.Equal(got, tt.want) { + t.Errorf("NamespaceIDToProto() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/felix/types/policy_id.go b/felix/types/policy_id.go new file mode 100644 index 00000000000..47947366d4b --- /dev/null +++ b/felix/types/policy_id.go @@ -0,0 +1,44 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "fmt" + + "github.com/projectcalico/calico/felix/proto" +) + +type PolicyID struct { + Tier string + Name string +} + +func (p PolicyID) String() string { + return fmt.Sprintf("{Tier: %s, Name: %s}", p.Tier, p.Name) +} + +func ProtoToPolicyID(p *proto.PolicyID) PolicyID { + return PolicyID{ + Tier: p.GetTier(), + Name: p.GetName(), + } +} + +func PolicyIDToProto(p PolicyID) *proto.PolicyID { + return &proto.PolicyID{ + Tier: p.Tier, + Name: p.Name, + } +} diff --git a/felix/types/policy_id_test.go b/felix/types/policy_id_test.go new file mode 100644 index 00000000000..2158fe7af9b --- /dev/null +++ b/felix/types/policy_id_test.go @@ -0,0 +1,77 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "testing" + + googleproto "google.golang.org/protobuf/proto" + + "github.com/projectcalico/calico/felix/proto" +) + +func TestPolicyID_String(t *testing.T) { + tests := []struct { + name string + p PolicyID + want string + }{ + {"empty", PolicyID{}, "{Tier: , Name: }"}, + {"non-empty", PolicyID{"foo", "bar"}, "{Tier: foo, Name: bar}"}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.p.String(); got != tt.want { + t.Errorf("PolicyID.String() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestProtoToPolicyID(t *testing.T) { + tests := []struct { + name string + p *proto.PolicyID + want PolicyID + }{ + {"empty", nil, PolicyID{}}, + {"non-empty", &proto.PolicyID{Tier: "foo", Name: "bar"}, PolicyID{Tier: "foo", Name: "bar"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ProtoToPolicyID(tt.p); got != tt.want { + t.Errorf("ProtoToPolicyID() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestPolicyIDToProto(t *testing.T) { + tests := []struct { + name string + p PolicyID + want *proto.PolicyID + }{ + {"empty", PolicyID{}, &proto.PolicyID{}}, + {"non-empty", PolicyID{Tier: "foo", Name: "bar"}, &proto.PolicyID{Tier: "foo", Name: "bar"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := PolicyIDToProto(tt.p); !googleproto.Equal(got, tt.want) { + t.Errorf("PolicyIDToProto() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/felix/types/profile_id.go b/felix/types/profile_id.go new file mode 100644 index 00000000000..df822b8aebc --- /dev/null +++ b/felix/types/profile_id.go @@ -0,0 +1,33 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import "github.com/projectcalico/calico/felix/proto" + +type ProfileID struct { + Name string +} + +func ProtoToProfileID(p *proto.ProfileID) ProfileID { + return ProfileID{ + Name: p.GetName(), + } +} + +func ProfileIDToProto(p ProfileID) *proto.ProfileID { + return &proto.ProfileID{ + Name: p.Name, + } +} diff --git a/felix/types/profile_id_test.go b/felix/types/profile_id_test.go new file mode 100644 index 00000000000..fb8df9c8f06 --- /dev/null +++ b/felix/types/profile_id_test.go @@ -0,0 +1,59 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "testing" + + googleproto "google.golang.org/protobuf/proto" + + "github.com/projectcalico/calico/felix/proto" +) + +func TestProtoToProfileID(t *testing.T) { + tests := []struct { + name string + p *proto.ProfileID + want ProfileID + }{ + {"empty", &proto.ProfileID{}, ProfileID{}}, + {"non-empty", &proto.ProfileID{Name: "foo"}, ProfileID{"foo"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ProtoToProfileID(tt.p); got != tt.want { + t.Errorf("ProtoToProfileID() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestProfileIDToProto(t *testing.T) { + tests := []struct { + name string + p ProfileID + want *proto.ProfileID + }{ + {"empty", ProfileID{}, &proto.ProfileID{}}, + {"non-empty", ProfileID{"foo"}, &proto.ProfileID{Name: "foo"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ProfileIDToProto(tt.p); !googleproto.Equal(got, tt.want) { + t.Errorf("ProfileIDToProto() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/felix/types/service_account_id.go b/felix/types/service_account_id.go new file mode 100644 index 00000000000..9321cfae323 --- /dev/null +++ b/felix/types/service_account_id.go @@ -0,0 +1,38 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/projectcalico/calico/felix/proto" +) + +type ServiceAccountID struct { + Namespace string + Name string +} + +func ProtoToServiceAccountID(s *proto.ServiceAccountID) ServiceAccountID { + return ServiceAccountID{ + Namespace: s.GetNamespace(), + Name: s.GetName(), + } +} + +func ServiceAccountIDToProto(s ServiceAccountID) *proto.ServiceAccountID { + return &proto.ServiceAccountID{ + Namespace: s.Namespace, + Name: s.Name, + } +} diff --git a/felix/types/service_account_id_test.go b/felix/types/service_account_id_test.go new file mode 100644 index 00000000000..b964bf1d0b2 --- /dev/null +++ b/felix/types/service_account_id_test.go @@ -0,0 +1,63 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "testing" + + googleproto "google.golang.org/protobuf/proto" + + "github.com/projectcalico/calico/felix/proto" +) + +func TestProtoToServiceAccountID(t *testing.T) { + tests := []struct { + name string + s *proto.ServiceAccountID + want ServiceAccountID + }{ + {"empty", &proto.ServiceAccountID{}, ServiceAccountID{}}, + {"non-empty", + &proto.ServiceAccountID{Namespace: "foo", Name: "bar"}, + ServiceAccountID{Namespace: "foo", Name: "bar"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ProtoToServiceAccountID(tt.s); got != tt.want { + t.Errorf("ProtoToServiceAccountID() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestServiceAccountIDToProto(t *testing.T) { + tests := []struct { + name string + s ServiceAccountID + want *proto.ServiceAccountID + }{ + {"empty", ServiceAccountID{}, &proto.ServiceAccountID{}}, + {"non-empty", + ServiceAccountID{Namespace: "foo", Name: "bar"}, + &proto.ServiceAccountID{Namespace: "foo", Name: "bar"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ServiceAccountIDToProto(tt.s); !googleproto.Equal(got, tt.want) { + t.Errorf("ServiceAccountIDToProto() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/felix/types/workload_endpoint_id.go b/felix/types/workload_endpoint_id.go new file mode 100644 index 00000000000..f94c86ee918 --- /dev/null +++ b/felix/types/workload_endpoint_id.go @@ -0,0 +1,41 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "github.com/projectcalico/calico/felix/proto" +) + +type WorkloadEndpointID struct { + OrchestratorId string + WorkloadId string + EndpointId string +} + +func ProtoToWorkloadEndpointID(w *proto.WorkloadEndpointID) WorkloadEndpointID { + return WorkloadEndpointID{ + OrchestratorId: w.GetOrchestratorId(), + WorkloadId: w.GetWorkloadId(), + EndpointId: w.GetEndpointId(), + } +} + +func WorkloadEndpointIDToProto(w WorkloadEndpointID) *proto.WorkloadEndpointID { + return &proto.WorkloadEndpointID{ + OrchestratorId: w.OrchestratorId, + WorkloadId: w.WorkloadId, + EndpointId: w.EndpointId, + } +} diff --git a/felix/types/workload_endpoint_id_test.go b/felix/types/workload_endpoint_id_test.go new file mode 100644 index 00000000000..c43157d32cc --- /dev/null +++ b/felix/types/workload_endpoint_id_test.go @@ -0,0 +1,63 @@ +// Copyright (c) 2024 Tigera, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package types + +import ( + "testing" + + googleproto "google.golang.org/protobuf/proto" + + "github.com/projectcalico/calico/felix/proto" +) + +func TestProtoToWorkloadEndpointID(t *testing.T) { + tests := []struct { + name string + w *proto.WorkloadEndpointID + want WorkloadEndpointID + }{ + {"empty", &proto.WorkloadEndpointID{}, WorkloadEndpointID{}}, + {"non-empty", + &proto.WorkloadEndpointID{OrchestratorId: "oid", WorkloadId: "wid", EndpointId: "eid"}, + WorkloadEndpointID{OrchestratorId: "oid", WorkloadId: "wid", EndpointId: "eid"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ProtoToWorkloadEndpointID(tt.w); got != tt.want { + t.Errorf("ProtoToWorkloadEndpointID() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestWorkloadEndpointIDToProto(t *testing.T) { + tests := []struct { + name string + w WorkloadEndpointID + want *proto.WorkloadEndpointID + }{ + {"empty", WorkloadEndpointID{}, &proto.WorkloadEndpointID{}}, + {"non-empty", + WorkloadEndpointID{OrchestratorId: "oid", WorkloadId: "wid", EndpointId: "eid"}, + &proto.WorkloadEndpointID{OrchestratorId: "oid", WorkloadId: "wid", EndpointId: "eid"}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := WorkloadEndpointIDToProto(tt.w); !googleproto.Equal(got, tt.want) { + t.Errorf("WorkloadEndpointIDToProto() = %v, want %v", got, tt.want) + } + }) + } +}