Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pass the whole VPA into cappingRecommendationProcessor.Apply() #7527

Merged
merged 6 commits into from
Dec 11, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func (p *recommendationProvider) GetContainersResourcesForPod(pod *core.Pod, vpa

if vpa.Status.Recommendation != nil {
var err error
recommendedPodResources, annotations, err = p.recommendationProcessor.Apply(vpa.Status.Recommendation, vpa.Spec.ResourcePolicy, vpa.Status.Conditions, pod)
recommendedPodResources, annotations, err = p.recommendationProcessor.Apply(vpa, pod)
if err != nil {
klog.V(2).InfoS("Cannot process recommendation for pod", "pod", klog.KObj(pod))
return nil, annotations, err
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func NewUpdatePriorityCalculator(vpa *vpa_types.VerticalPodAutoscaler,

// AddPod adds pod to the UpdatePriorityCalculator.
func (calc *UpdatePriorityCalculator) AddPod(pod *apiv1.Pod, now time.Time) {
processedRecommendation, _, err := calc.recommendationProcessor.Apply(calc.vpa.Status.Recommendation, calc.vpa.Spec.ResourcePolicy, calc.vpa.Status.Conditions, pod)
processedRecommendation, _, err := calc.recommendationProcessor.Apply(calc.vpa, pod)
if err != nil {
klog.V(2).ErrorS(err, "Cannot process recommendation for pod", "pod", klog.KObj(pod))
return
Expand Down
10 changes: 3 additions & 7 deletions vertical-pod-autoscaler/pkg/utils/test/test_utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -219,9 +219,7 @@ type RecommendationProcessorMock struct {
}

// Apply is a mock implementation of RecommendationProcessor.Apply
func (m *RecommendationProcessorMock) Apply(podRecommendation *vpa_types.RecommendedPodResources,
policy *vpa_types.PodResourcePolicy,
conditions []vpa_types.VerticalPodAutoscalerCondition,
func (m *RecommendationProcessorMock) Apply(vpa *vpa_types.VerticalPodAutoscaler,
pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, map[string][]string, error) {
args := m.Called()
var returnArg *vpa_types.RecommendedPodResources
Expand All @@ -239,11 +237,9 @@ func (m *RecommendationProcessorMock) Apply(podRecommendation *vpa_types.Recomme
type FakeRecommendationProcessor struct{}

// Apply is a dummy implementation of RecommendationProcessor.Apply which returns provided podRecommendation
func (f *FakeRecommendationProcessor) Apply(podRecommendation *vpa_types.RecommendedPodResources,
policy *vpa_types.PodResourcePolicy,
conditions []vpa_types.VerticalPodAutoscalerCondition,
func (f *FakeRecommendationProcessor) Apply(vpa *vpa_types.VerticalPodAutoscaler,
pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, map[string][]string, error) {
return podRecommendation, nil, nil
return vpa.Status.Recommendation, nil, nil
}

// fakeEventRecorder is a dummy implementation of record.EventRecorder.
Expand Down
16 changes: 12 additions & 4 deletions vertical-pod-autoscaler/pkg/utils/vpa/capping.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,20 @@ type cappingRecommendationProcessor struct {

// Apply returns a recommendation for the given pod, adjusted to obey policy and limits.
func (c *cappingRecommendationProcessor) Apply(
podRecommendation *vpa_types.RecommendedPodResources,
policy *vpa_types.PodResourcePolicy,
conditions []vpa_types.VerticalPodAutoscalerCondition,
vpa *vpa_types.VerticalPodAutoscaler,
pod *apiv1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) {
// TODO: Annotate if request enforced by maintaining proportion with limit and allowed limit range is in conflict with policy.

if vpa == nil {
Copy link
Contributor

@voelzmo voelzmo Dec 3, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry for the back-and-forth, but should we really add these nilchecks? You're changing the signature coming from explicitly passing the recommendations, policy and conditions to passing an entire VPA object. So the context where all this is currently called from already ensures that the VPA and Pod aren't nil: https://github.com/kubernetes/autoscaler/pull/7527/files#diff-b21ffa4a9ddd85d3bdd971cf8ff4cdb5050b05f8c349137a3f7e3152d4634d6eR108-R110

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

That is something I had considered, but I landed on the fact that this function should be able to handle any possible input.

I don't actually know if there's a Go or Kubernetes best practice around this. I figured that more defensive code is a good thing, but I'm also happy to change, but I'll wait for consensus before changing it.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In my opinion, since this is an exported function, we should include the nil check as to prevent panics from invalid usage. But, I do agree that we're only changing the signature to so it's unlikely the VPA object would ever be nil in current usage patterns. I'm comfortable with whatever you decide.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

+1 for defensive approach here.

return nil, nil, fmt.Errorf("cannot process nil vpa")
}
if pod == nil {
return nil, nil, fmt.Errorf("cannot process nil pod")
}

policy := vpa.Spec.ResourcePolicy
podRecommendation := vpa.Status.Recommendation

if podRecommendation == nil && policy == nil {
// If there is no recommendation and no policies have been defined then no recommendation can be computed.
return nil, nil, nil
Expand All @@ -76,7 +84,7 @@ func (c *cappingRecommendationProcessor) Apply(
container := getContainer(containerRecommendation.ContainerName, pod)

if container == nil {
klog.V(2).InfoS("No matching Container found for recommendation", "containerName", containerRecommendation.ContainerName)
klog.V(2).InfoS("No matching Container found for recommendation", "containerName", containerRecommendation.ContainerName, "vpa", klog.KObj(vpa))
continue
}

Expand Down
96 changes: 75 additions & 21 deletions vertical-pod-autoscaler/pkg/utils/vpa/capping_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,29 +26,47 @@ import (
"k8s.io/autoscaler/vertical-pod-autoscaler/pkg/utils/test"
)

func TestApplyWithNilVPA(t *testing.T) {
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName("ctr-name").Get()).Get()
processor := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{})

res, annotations, err := processor.Apply(nil, pod)
assert.Error(t, err)
assert.Nil(t, res)
assert.Nil(t, annotations)
}
func TestApplyWithNilPod(t *testing.T) {
vpa := test.VerticalPodAutoscaler().WithContainer("container").Get()
processor := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{})

res, annotations, err := processor.Apply(vpa, nil)
assert.Error(t, err)
assert.Nil(t, res)
assert.Nil(t, annotations)
}

func TestRecommendationNotAvailable(t *testing.T) {
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName("ctr-name").Get()).Get()
podRecommendation := vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "ctr-name-other",
Target: apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewScaledQuantity(100, 1),
apiv1.ResourceMemory: *resource.NewScaledQuantity(50000, 1),
},
},
},
}
policy := vpa_types.PodResourcePolicy{}

res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod)
containerName := "ctr-name-other"
vpa := test.VerticalPodAutoscaler().
WithContainer(containerName).
AppendRecommendation(
test.Recommendation().
WithContainer(containerName).
WithTarget("100m", "50000Mi").
GetContainerResources()).
Get()

res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(vpa, pod)
assert.Nil(t, err)
assert.Empty(t, annotations)
assert.Empty(t, res.ContainerRecommendations)
}

func TestRecommendationToLimitCapping(t *testing.T) {
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName("ctr-name").Get()).Get()
containerName := "ctr-name"
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName(containerName).Get()).Get()
pod.Spec.Containers[0].Resources.Limits =
apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewScaledQuantity(3, 1),
Expand All @@ -69,6 +87,10 @@ func TestRecommendationToLimitCapping(t *testing.T) {
},
},
}

vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
vpa.Status.Recommendation = &podRecommendation

requestsAndLimits := vpa_types.ContainerControlledValuesRequestsAndLimits
requestsOnly := vpa_types.ContainerControlledValuesRequestsOnly
for _, tc := range []struct {
Expand Down Expand Up @@ -125,7 +147,8 @@ func TestRecommendationToLimitCapping(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &tc.policy, nil, pod)
vpa.Spec.ResourcePolicy = &tc.policy
res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(vpa, pod)
assert.Nil(t, err)
assert.Equal(t, tc.expectedTarget, res.ContainerRecommendations[0].Target)

Expand Down Expand Up @@ -179,7 +202,14 @@ func TestRecommendationCappedToMinMaxPolicy(t *testing.T) {
},
}

res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(&podRecommendation, &policy, nil, pod)
containerName := "ctr-name"
vpa := test.VerticalPodAutoscaler().
WithContainer(containerName).
Get()
vpa.Spec.ResourcePolicy = &policy
vpa.Status.Recommendation = &podRecommendation

res, annotations, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(vpa, pod)
assert.Nil(t, err)
assert.Equal(t, apiv1.ResourceList{
apiv1.ResourceCPU: *resource.NewScaledQuantity(40, 1),
Expand Down Expand Up @@ -238,11 +268,16 @@ var applyTestCases = []struct {
}

func TestApply(t *testing.T) {
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName("ctr-name").Get()).Get()
containerName := "ctr-name"
pod := test.Pod().WithName("pod1").AddContainer(test.Container().WithName(containerName).Get()).Get()

for _, testCase := range applyTestCases {

vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
vpa.Spec.ResourcePolicy = testCase.Policy
vpa.Status.Recommendation = testCase.PodRecommendation
res, _, err := NewCappingRecommendationProcessor(&fakeLimitRangeCalculator{}).Apply(
testCase.PodRecommendation, testCase.Policy, nil, pod)
vpa, pod)
assert.Equal(t, testCase.ExpectedPodRecommendation, res)
assert.Equal(t, testCase.ExpectedError, err)
}
Expand Down Expand Up @@ -334,6 +369,12 @@ func TestApplyCapsToLimitRange(t *testing.T) {
apiv1.ResourceMemory: resource.MustParse("500M"),
},
}

containerName := "container"
vpa := test.VerticalPodAutoscaler().
WithContainer(containerName).
Get()

recommendation := vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
Expand All @@ -345,6 +386,8 @@ func TestApplyCapsToLimitRange(t *testing.T) {
},
},
}
vpa.Status.Recommendation = &recommendation

pod := apiv1.Pod{
Spec: apiv1.PodSpec{
Containers: []apiv1.Container{
Expand Down Expand Up @@ -378,7 +421,7 @@ func TestApplyCapsToLimitRange(t *testing.T) {

calculator := fakeLimitRangeCalculator{containerLimitRange: limitRange}
processor := NewCappingRecommendationProcessor(&calculator)
processedRecommendation, annotations, err := processor.Apply(&recommendation, nil, nil, &pod)
processedRecommendation, annotations, err := processor.Apply(vpa, &pod)
assert.NoError(t, err)
assert.Contains(t, annotations, "container")
assert.ElementsMatch(t, []string{"cpu capped to fit Max in container LimitRange", "memory capped to fit Min in container LimitRange"}, annotations["container"])
Expand Down Expand Up @@ -1144,7 +1187,13 @@ func TestApplyLimitRangeMinToRequest(t *testing.T) {
podLimitRange: tc.podLimitRange,
}
processor := NewCappingRecommendationProcessor(&calculator)
processedRecommendation, annotations, err := processor.Apply(&tc.resources, tc.policy, nil, &tc.pod)

containerName := "ctr-name"
vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
vpa.Spec.ResourcePolicy = tc.policy
vpa.Status.Recommendation = &tc.resources

processedRecommendation, annotations, err := processor.Apply(vpa, &tc.pod)
assert.NoError(t, err)
for containerName, expectedAnnotations := range tc.expectAnnotations {
if assert.Contains(t, annotations, containerName) {
Expand Down Expand Up @@ -1270,7 +1319,12 @@ func TestCapPodMemoryWithUnderByteSplit(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
calculator := fakeLimitRangeCalculator{podLimitRange: tc.limitRange}
processor := NewCappingRecommendationProcessor(&calculator)
processedRecommendation, _, err := processor.Apply(&recommendation, nil, nil, &pod)

containerName := "ctr-name"
vpa := test.VerticalPodAutoscaler().WithContainer(containerName).Get()
vpa.Status.Recommendation = &recommendation

processedRecommendation, _, err := processor.Apply(vpa, &pod)
assert.NoError(t, err)
assert.Equal(t, tc.expectedRecommendation, *processedRecommendation)
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ limitations under the License.
package api

import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
)

Expand All @@ -29,8 +29,6 @@ type RecommendationProcessor interface {
// Apply processes and updates recommendation for given pod, based on container limits,
// VPA policy and possibly other internal RecommendationProcessor context.
// Must return a non-nil pointer to RecommendedPodResources or error.
Apply(podRecommendation *vpa_types.RecommendedPodResources,
policy *vpa_types.PodResourcePolicy,
conditions []vpa_types.VerticalPodAutoscalerCondition,
Apply(vpa *vpa_types.VerticalPodAutoscaler,
pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error)
}
23 changes: 17 additions & 6 deletions vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,9 @@ limitations under the License.
package api

import (
"k8s.io/api/core/v1"
"fmt"

v1 "k8s.io/api/core/v1"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"
)

Expand All @@ -31,19 +33,28 @@ type sequentialRecommendationProcessor struct {
}

// Apply chains calls to underlying RecommendationProcessors in order provided on object construction
func (p *sequentialRecommendationProcessor) Apply(podRecommendation *vpa_types.RecommendedPodResources,
policy *vpa_types.PodResourcePolicy,
conditions []vpa_types.VerticalPodAutoscalerCondition,
func (p *sequentialRecommendationProcessor) Apply(
vpa *vpa_types.VerticalPodAutoscaler,
pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) {
recommendation := podRecommendation

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same for here ( not sure if needed but just my opinion ):

if vpa == nil {
      return nil, nil, fmt.Errorf("cannot process nil vpa")
  }
if vpa.Status.Recommendation == nil {
    return nil, nil, nil
}

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should be fixed in 13d6ffa

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we check pod here as well?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought about adding a check there, but all that function does is pass Pod on to another function call.
Since this function doesn't actually do anything to Pod, I figured the guards could be in the functions that would do something with Pod.

if vpa == nil {
return nil, nil, fmt.Errorf("cannot process nil vpa")
}
if vpa.Status.Recommendation == nil {
return nil, nil, nil
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I actually wonder if this should be an error case...

The processors are generally "adjusting" the recommendation one at a time. If there is no recommendation to adjust then this feels like an error case to me.

@omerap12 or @adrianmoisey are you seeing a case where this should be allowed?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh wait I see that this is being done in the capping processor. Perhaps it is best to be consistent.

However I wonder if this can eventually be made more strict and turned into an error.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am assuming that this case should never happen

}

recommendation := vpa.Status.Recommendation

accumulatedContainerToAnnotationsMap := ContainerToAnnotationsMap{}

for _, processor := range p.processors {
var (
err error
containerToAnnotationsMap ContainerToAnnotationsMap
)
recommendation, containerToAnnotationsMap, err = processor.Apply(recommendation, policy, conditions, pod)
recommendation, containerToAnnotationsMap, err = processor.Apply(vpa, pod)
vpa.Status.Recommendation = recommendation

for container, newAnnotations := range containerToAnnotationsMap {
annotations, found := accumulatedContainerToAnnotationsMap[container]
Expand Down
25 changes: 14 additions & 11 deletions vertical-pod-autoscaler/pkg/utils/vpa/sequential_processor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ package api
import (
"testing"

"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
vpa_types "k8s.io/autoscaler/vertical-pod-autoscaler/pkg/apis/autoscaling.k8s.io/v1"

"github.com/stretchr/testify/assert"
Expand All @@ -29,11 +29,9 @@ type fakeProcessor struct {
message string
}

func (p *fakeProcessor) Apply(podRecommendation *vpa_types.RecommendedPodResources,
policy *vpa_types.PodResourcePolicy,
conditions []vpa_types.VerticalPodAutoscalerCondition,
func (p *fakeProcessor) Apply(vpa *vpa_types.VerticalPodAutoscaler,
pod *v1.Pod) (*vpa_types.RecommendedPodResources, ContainerToAnnotationsMap, error) {
result := podRecommendation.DeepCopy()
result := vpa.Status.Recommendation
result.ContainerRecommendations[0].ContainerName += p.message
containerToAnnotationsMap := ContainerToAnnotationsMap{"trace": []string{p.message}}
return result, containerToAnnotationsMap, nil
Expand All @@ -43,13 +41,18 @@ func TestSequentialProcessor(t *testing.T) {
name1 := "processor1"
name2 := "processor2"
tested := NewSequentialProcessor([]RecommendationProcessor{&fakeProcessor{name1}, &fakeProcessor{name2}})
rec1 := &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "",
vpa1 := &vpa_types.VerticalPodAutoscaler{
Status: vpa_types.VerticalPodAutoscalerStatus{
Recommendation: &vpa_types.RecommendedPodResources{
ContainerRecommendations: []vpa_types.RecommendedContainerResources{
{
ContainerName: "",
},
},
},
}}
result, annotations, _ := tested.Apply(rec1, nil, nil, nil)
},
}
result, annotations, _ := tested.Apply(vpa1, nil)
assert.Equal(t, name1+name2, result.ContainerRecommendations[0].ContainerName)
assert.Contains(t, annotations, "trace")
assert.Contains(t, annotations["trace"], name1)
Expand Down
Loading