diff --git a/CHANGELOG.md b/CHANGELOG.md index 889779c1ea..1c4a74290a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,21 @@ # Changelog +### [2.0.1](https://github.com/kubeflow/pipelines/compare/2.0.0...2.0.1) (2023-08-17) + + +### Bug Fixes + +* **backend:** Fix performance issue within a mysql request ([\#9680](https://github.com/kubeflow/pipelines/issues/9680)) ([81618d0](https://github.com/kubeflow/pipelines/commit/81618d0fd6810560e0b78c61776d73042bd6f3bb)) +* **backend:** fix timeouts with list run api. Fixes [\#9780](https://github.com/kubeflow/pipelines/issues/9780) ([\#9806](https://github.com/kubeflow/pipelines/issues/9806)) ([c467ece](https://github.com/kubeflow/pipelines/commit/c467ece30551046fa0304a6a7067d3e185d7cf14)) +* **frontend:** Introduce ALLOWED_ARTIFACT_DOMAIN_REGEX flag to prevent accessing undesired domains. Remove user input string from server response. ([\#9844](https://github.com/kubeflow/pipelines/issues/9844)) ([737c0cc](https://github.com/kubeflow/pipelines/commit/737c0cc12606da3994e978678ace7adb1b309944)) + + +### Other Pull Requests + +* Fix Persistence Agent SA Token time interval ([\#9892](https://github.com/kubeflow/pipelines/issues/9892)) ([681c46f](https://github.com/kubeflow/pipelines/commit/681c46f62bb1d3aa5e1e4db2a239c7c4dd64881a)) +* feat(backend) Enable auth between pesistence agent and pipelineAPI (ReportServer) ([\#9699](https://github.com/kubeflow/pipelines/issues/9699)) ([f232d0b](https://github.com/kubeflow/pipelines/commit/f232d0b3902bf666a2bfdc65ac6f93934e010083)) +* fix(backend) Replace LEFT with INNER JOIN when Archive Experiment ([\#9730](https://github.com/kubeflow/pipelines/issues/9730)) ([5593dee](https://github.com/kubeflow/pipelines/commit/5593dee729b0b9518c1a70dbc3f0052796c4f10a)) + ## [2.0.0](https://github.com/kubeflow/pipelines/compare/1.7.0...2.0.0) (2023-06-20) diff --git a/VERSION b/VERSION index 359a5b952d..10bf840ed5 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.0.0 \ No newline at end of file +2.0.1 \ No newline at end of file diff --git a/backend/api/v1beta1/python_http_client/README.md b/backend/api/v1beta1/python_http_client/README.md index 2435026978..f0e94be6d2 100644 --- a/backend/api/v1beta1/python_http_client/README.md +++ b/backend/api/v1beta1/python_http_client/README.md @@ -3,8 +3,8 @@ This file contains REST API specification for Kubeflow Pipelines. The file is au This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 2.0.0 -- Package version: 2.0.0 +- API version: 2.0.1 +- Package version: 2.0.1 - Build package: org.openapitools.codegen.languages.PythonClientCodegen For more information, please visit [https://www.google.com](https://www.google.com) diff --git a/backend/api/v1beta1/python_http_client/kfp_server_api/__init__.py b/backend/api/v1beta1/python_http_client/kfp_server_api/__init__.py index f14da081f1..fc1497d659 100644 --- a/backend/api/v1beta1/python_http_client/kfp_server_api/__init__.py +++ b/backend/api/v1beta1/python_http_client/kfp_server_api/__init__.py @@ -14,7 +14,7 @@ from __future__ import absolute_import -__version__ = "2.0.0" +__version__ = "2.0.1" # import apis into sdk package from kfp_server_api.api.experiment_service_api import ExperimentServiceApi diff --git a/backend/api/v1beta1/python_http_client/kfp_server_api/api_client.py b/backend/api/v1beta1/python_http_client/kfp_server_api/api_client.py index 2ca5b13e98..5b4cb571de 100644 --- a/backend/api/v1beta1/python_http_client/kfp_server_api/api_client.py +++ b/backend/api/v1beta1/python_http_client/kfp_server_api/api_client.py @@ -78,7 +78,7 @@ def __init__(self, configuration=None, header_name=None, header_value=None, self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. - self.user_agent = 'OpenAPI-Generator/2.0.0/python' + self.user_agent = 'OpenAPI-Generator/2.0.1/python' self.client_side_validation = configuration.client_side_validation def __enter__(self): diff --git a/backend/api/v1beta1/python_http_client/kfp_server_api/configuration.py b/backend/api/v1beta1/python_http_client/kfp_server_api/configuration.py index c00241a23c..fe73377512 100644 --- a/backend/api/v1beta1/python_http_client/kfp_server_api/configuration.py +++ b/backend/api/v1beta1/python_http_client/kfp_server_api/configuration.py @@ -351,8 +351,8 @@ def to_debug_report(self): return "Python SDK Debug Report:\n"\ "OS: {env}\n"\ "Python Version: {pyversion}\n"\ - "Version of the API: 2.0.0\n"\ - "SDK Package Version: 2.0.0".\ + "Version of the API: 2.0.1\n"\ + "SDK Package Version: 2.0.1".\ format(env=sys.platform, pyversion=sys.version) def get_host_settings(self): diff --git a/backend/api/v1beta1/python_http_client/setup.py b/backend/api/v1beta1/python_http_client/setup.py index 70cc6616fb..d3fd643008 100644 --- a/backend/api/v1beta1/python_http_client/setup.py +++ b/backend/api/v1beta1/python_http_client/setup.py @@ -13,7 +13,7 @@ from setuptools import setup, find_packages # noqa: H301 NAME = "kfp-server-api" -VERSION = "2.0.0" +VERSION = "2.0.1" # To install the library, run the following # # python setup.py install diff --git a/backend/api/v1beta1/swagger/kfp_api_single_file.swagger.json b/backend/api/v1beta1/swagger/kfp_api_single_file.swagger.json index 83bfd2daf3..a4ebf3ca8a 100644 --- a/backend/api/v1beta1/swagger/kfp_api_single_file.swagger.json +++ b/backend/api/v1beta1/swagger/kfp_api_single_file.swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Kubeflow Pipelines API", - "version": "2.0.0", + "version": "2.0.1", "description": "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.", "contact": { "name": "google", diff --git a/backend/api/v2beta1/python_http_client/README.md b/backend/api/v2beta1/python_http_client/README.md index 1a10103b5c..dd98e9d6bd 100644 --- a/backend/api/v2beta1/python_http_client/README.md +++ b/backend/api/v2beta1/python_http_client/README.md @@ -3,8 +3,8 @@ This file contains REST API specification for Kubeflow Pipelines. The file is au This Python package is automatically generated by the [OpenAPI Generator](https://openapi-generator.tech) project: -- API version: 2.0.0 -- Package version: 2.0.0 +- API version: 2.0.1 +- Package version: 2.0.1 - Build package: org.openapitools.codegen.languages.PythonClientCodegen For more information, please visit [https://www.google.com](https://www.google.com) diff --git a/backend/api/v2beta1/python_http_client/kfp_server_api/__init__.py b/backend/api/v2beta1/python_http_client/kfp_server_api/__init__.py index 693e0410dc..87463f0a21 100644 --- a/backend/api/v2beta1/python_http_client/kfp_server_api/__init__.py +++ b/backend/api/v2beta1/python_http_client/kfp_server_api/__init__.py @@ -14,7 +14,7 @@ from __future__ import absolute_import -__version__ = "2.0.0" +__version__ = "2.0.1" # import apis into sdk package from kfp_server_api.api.auth_service_api import AuthServiceApi diff --git a/backend/api/v2beta1/python_http_client/kfp_server_api/api_client.py b/backend/api/v2beta1/python_http_client/kfp_server_api/api_client.py index 2ca5b13e98..5b4cb571de 100644 --- a/backend/api/v2beta1/python_http_client/kfp_server_api/api_client.py +++ b/backend/api/v2beta1/python_http_client/kfp_server_api/api_client.py @@ -78,7 +78,7 @@ def __init__(self, configuration=None, header_name=None, header_value=None, self.default_headers[header_name] = header_value self.cookie = cookie # Set default User-Agent. - self.user_agent = 'OpenAPI-Generator/2.0.0/python' + self.user_agent = 'OpenAPI-Generator/2.0.1/python' self.client_side_validation = configuration.client_side_validation def __enter__(self): diff --git a/backend/api/v2beta1/python_http_client/kfp_server_api/configuration.py b/backend/api/v2beta1/python_http_client/kfp_server_api/configuration.py index c00241a23c..fe73377512 100644 --- a/backend/api/v2beta1/python_http_client/kfp_server_api/configuration.py +++ b/backend/api/v2beta1/python_http_client/kfp_server_api/configuration.py @@ -351,8 +351,8 @@ def to_debug_report(self): return "Python SDK Debug Report:\n"\ "OS: {env}\n"\ "Python Version: {pyversion}\n"\ - "Version of the API: 2.0.0\n"\ - "SDK Package Version: 2.0.0".\ + "Version of the API: 2.0.1\n"\ + "SDK Package Version: 2.0.1".\ format(env=sys.platform, pyversion=sys.version) def get_host_settings(self): diff --git a/backend/api/v2beta1/python_http_client/setup.py b/backend/api/v2beta1/python_http_client/setup.py index 70cc6616fb..d3fd643008 100644 --- a/backend/api/v2beta1/python_http_client/setup.py +++ b/backend/api/v2beta1/python_http_client/setup.py @@ -13,7 +13,7 @@ from setuptools import setup, find_packages # noqa: H301 NAME = "kfp-server-api" -VERSION = "2.0.0" +VERSION = "2.0.1" # To install the library, run the following # # python setup.py install diff --git a/backend/api/v2beta1/swagger/kfp_api_single_file.swagger.json b/backend/api/v2beta1/swagger/kfp_api_single_file.swagger.json index ac42a27779..bf218246d0 100644 --- a/backend/api/v2beta1/swagger/kfp_api_single_file.swagger.json +++ b/backend/api/v2beta1/swagger/kfp_api_single_file.swagger.json @@ -2,7 +2,7 @@ "swagger": "2.0", "info": { "title": "Kubeflow Pipelines API", - "version": "2.0.0", + "version": "2.0.1", "description": "This file contains REST API specification for Kubeflow Pipelines. The file is autogenerated from the swagger definition.", "contact": { "name": "google", diff --git a/backend/src/agent/persistence/client/pipeline_client.go b/backend/src/agent/persistence/client/pipeline_client.go index 994ef56c41..8d0d775bff 100644 --- a/backend/src/agent/persistence/client/pipeline_client.go +++ b/backend/src/agent/persistence/client/pipeline_client.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "os" + "strings" "time" "github.com/kubeflow/pipelines/backend/src/apiserver/common" @@ -47,11 +48,13 @@ type PipelineClient struct { timeout time.Duration reportServiceClient apiv2.ReportServiceClient runServiceClient api.RunServiceClient + tokenRefresher TokenRefresherInterface } func NewPipelineClient( initializeTimeout time.Duration, timeout time.Duration, + tokenRefresher TokenRefresherInterface, basePath string, mlPipelineServiceName string, mlPipelineServiceHttpPort string, @@ -73,12 +76,17 @@ func NewPipelineClient( initializeTimeout: initializeTimeout, timeout: timeout, reportServiceClient: apiv2.NewReportServiceClient(connection), + tokenRefresher: tokenRefresher, runServiceClient: api.NewRunServiceClient(connection), }, nil } func (p *PipelineClient) ReportWorkflow(workflow util.ExecutionSpec) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + pctx := context.Background() + pctx = metadata.AppendToOutgoingContext(pctx, "Authorization", + "Bearer "+p.tokenRefresher.GetToken()) + + ctx, cancel := context.WithTimeout(pctx, time.Minute) defer cancel() _, err := p.reportServiceClient.ReportWorkflow(ctx, &apiv2.ReportWorkflowRequest{ @@ -97,6 +105,15 @@ func (p *PipelineClient) ReportWorkflow(workflow util.ExecutionSpec) error { statusCode.Message(), err.Error(), workflow.ToStringForStore()) + } else if statusCode.Code() == codes.Unauthenticated && strings.Contains(err.Error(), "service account token has expired") { + // If unauthenticated because SA token is expired, re-read/refresh the token and try again + p.tokenRefresher.RefreshToken() + return util.NewCustomError(err, util.CUSTOM_CODE_TRANSIENT, + "Error while reporting workflow resource (code: %v, message: %v): %v, %+v", + statusCode.Code(), + statusCode.Message(), + err.Error(), + workflow.ToStringForStore()) } else { // Retry otherwise return util.NewCustomError(err, util.CUSTOM_CODE_TRANSIENT, @@ -111,7 +128,11 @@ func (p *PipelineClient) ReportWorkflow(workflow util.ExecutionSpec) error { } func (p *PipelineClient) ReportScheduledWorkflow(swf *util.ScheduledWorkflow) error { - ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + pctx := context.Background() + pctx = metadata.AppendToOutgoingContext(pctx, "Authorization", + "Bearer "+p.tokenRefresher.GetToken()) + + ctx, cancel := context.WithTimeout(pctx, time.Minute) defer cancel() _, err := p.reportServiceClient.ReportScheduledWorkflow(ctx, @@ -129,6 +150,15 @@ func (p *PipelineClient) ReportScheduledWorkflow(swf *util.ScheduledWorkflow) er statusCode.Message(), err.Error(), swf.ScheduledWorkflow) + } else if statusCode.Code() == codes.Unauthenticated && strings.Contains(err.Error(), "service account token has expired") { + // If unauthenticated because SA token is expired, re-read/refresh the token and try again + p.tokenRefresher.RefreshToken() + return util.NewCustomError(err, util.CUSTOM_CODE_TRANSIENT, + "Error while reporting workflow resource (code: %v, message: %v): %v, %+v", + statusCode.Code(), + statusCode.Message(), + err.Error(), + swf.ScheduledWorkflow) } else { // Retry otherwise return util.NewCustomError(err, util.CUSTOM_CODE_TRANSIENT, diff --git a/backend/src/agent/persistence/client/token_refresher.go b/backend/src/agent/persistence/client/token_refresher.go new file mode 100644 index 0000000000..addbbb3f54 --- /dev/null +++ b/backend/src/agent/persistence/client/token_refresher.go @@ -0,0 +1,78 @@ +package client + +import ( + log "github.com/sirupsen/logrus" + "os" + "sync" + "time" +) + +type TokenRefresherInterface interface { + GetToken() string + RefreshToken() error +} + +const SaTokenFile = "/var/run/secrets/kubeflow/tokens/persistenceagent-sa-token" + +type FileReader interface { + ReadFile(filename string) ([]byte, error) +} + +type tokenRefresher struct { + mu sync.RWMutex + interval *time.Duration + token string + fileReader *FileReader +} + +type FileReaderImpl struct{} + +func (r *FileReaderImpl) ReadFile(filename string) ([]byte, error) { + return os.ReadFile(filename) +} + +func NewTokenRefresher(interval time.Duration, fileReader FileReader) *tokenRefresher { + if fileReader == nil { + fileReader = &FileReaderImpl{} + } + + tokenRefresher := &tokenRefresher{ + interval: &interval, + fileReader: &fileReader, + } + + return tokenRefresher +} + +func (tr *tokenRefresher) StartTokenRefreshTicker() error { + err := tr.RefreshToken() + if err != nil { + return err + } + + ticker := time.NewTicker(*tr.interval) + go func() { + for range ticker.C { + tr.RefreshToken() + } + }() + return err +} + +func (tr *tokenRefresher) GetToken() string { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.token +} + +func (tr *tokenRefresher) RefreshToken() error { + tr.mu.Lock() + defer tr.mu.Unlock() + b, err := (*tr.fileReader).ReadFile(SaTokenFile) + if err != nil { + log.Errorf("Error reading persistence agent service account token '%s': %v", SaTokenFile, err) + return err + } + tr.token = string(b) + return nil +} diff --git a/backend/src/agent/persistence/client/token_refresher_test.go b/backend/src/agent/persistence/client/token_refresher_test.go new file mode 100644 index 0000000000..b6e50d124d --- /dev/null +++ b/backend/src/agent/persistence/client/token_refresher_test.go @@ -0,0 +1,111 @@ +package client + +import ( + "fmt" + "io/fs" + "log" + "syscall" + "testing" + "time" +) + +const refreshInterval = 2 * time.Second + +type FileReaderFake struct { + Data string + Err error + readCounter int +} + +func (m *FileReaderFake) ReadFile(filename string) ([]byte, error) { + if m.Err != nil { + return nil, m.Err + } + content := fmt.Sprintf("%s-%v", m.Data, m.readCounter) + m.readCounter++ + return []byte(content), nil +} + +func Test_token_refresher(t *testing.T) { + tests := []struct { + name string + baseToken string + wanted string + refreshedToken string + err error + }{ + { + name: "TestTokenRefresher_GetToken_Success", + baseToken: "rightToken", + wanted: "rightToken-0", + err: nil, + }, + { + name: "TestTokenRefresher_GetToken_Failed_PathError", + baseToken: "rightToken", + wanted: "rightToken-0", + err: &fs.PathError{Err: syscall.ENOENT}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // setup + fakeFileReader := &FileReaderFake{ + Data: tt.baseToken, + Err: tt.err, + } + tr := NewTokenRefresher(refreshInterval, fakeFileReader) + err := tr.StartTokenRefreshTicker() + if err != nil { + got, sameType := err.(*fs.PathError) + if sameType != true { + t.Errorf("%v(): got = %v, wanted %v", tt.name, got, tt.err) + } + return + } + if err != nil { + log.Fatalf("Error starting Service Account Token Refresh Ticker: %v", err) + } + + if got := tr.GetToken(); got != tt.wanted { + t.Errorf("%v(): got %v, wanted %v", tt.name, got, tt.wanted) + } + }) + } +} + +func TestTokenRefresher_GetToken_After_TickerRefresh_Success(t *testing.T) { + fakeFileReader := &FileReaderFake{ + Data: "Token", + Err: nil, + } + tr := NewTokenRefresher(1*time.Second, fakeFileReader) + err := tr.StartTokenRefreshTicker() + if err != nil { + log.Fatalf("Error starting Service Account Token Refresh Ticker: %v", err) + } + time.Sleep(1200 * time.Millisecond) + expectedToken := "Token-1" + + if got := tr.GetToken(); got != expectedToken { + t.Errorf("%v(): got %v, wanted 'refreshed baseToken' %v", t.Name(), got, expectedToken) + } +} + +func TestTokenRefresher_GetToken_After_ForceRefresh_Success(t *testing.T) { + fakeFileReader := &FileReaderFake{ + Data: "Token", + Err: nil, + } + tr := NewTokenRefresher(refreshInterval, fakeFileReader) + err := tr.StartTokenRefreshTicker() + if err != nil { + log.Fatalf("Error starting Service Account Token Refresh Ticker: %v", err) + } + tr.RefreshToken() + expectedToken := "Token-1" + + if got := tr.GetToken(); got != expectedToken { + t.Errorf("%v(): got %v, wanted 'refreshed baseToken' %v", t.Name(), got, expectedToken) + } +} diff --git a/backend/src/agent/persistence/main.go b/backend/src/agent/persistence/main.go index c624f4c8c9..60a6a7f06b 100644 --- a/backend/src/agent/persistence/main.go +++ b/backend/src/agent/persistence/main.go @@ -44,6 +44,7 @@ var ( clientQPS float64 clientBurst int executionType string + saTokenRefreshIntervalInSecs int64 ) const ( @@ -61,10 +62,12 @@ const ( clientQPSFlagName = "clientQPS" clientBurstFlagName = "clientBurst" executionTypeFlagName = "executionType" + saTokenRefreshIntervalFlagName = "saTokenRefreshIntervalInSecs" ) const ( - DefaultConnectionTimeout = 6 * time.Minute + DefaultConnectionTimeout = 6 * time.Minute + DefaultSATokenRefresherIntervalInSecs = 60 * 60 // 1 Hour in seconds ) func main() { @@ -102,9 +105,16 @@ func main() { Burst: clientBurst, }) + tokenRefresher := client.NewTokenRefresher(time.Duration(saTokenRefreshIntervalInSecs)*time.Second, nil) + err = tokenRefresher.StartTokenRefreshTicker() + if err != nil { + log.Fatalf("Error starting Service Account Token Refresh Ticker due to: %v", err) + } + pipelineClient, err := client.NewPipelineClient( initializeTimeout, timeout, + tokenRefresher, mlPipelineAPIServerBasePath, mlPipelineAPIServerName, mlPipelineServiceHttpPort, @@ -146,4 +156,7 @@ func init() { flag.Float64Var(&clientQPS, clientQPSFlagName, 5, "The maximum QPS to the master from this client.") flag.IntVar(&clientBurst, clientBurstFlagName, 10, "Maximum burst for throttle from this client.") flag.StringVar(&executionType, executionTypeFlagName, "Workflow", "Custom Resource's name of the backend Orchestration Engine") + // TODO use viper/config file instead. Sync `saTokenRefreshIntervalFlagName` with the value from manifest file by using ENV var. + flag.Int64Var(&saTokenRefreshIntervalInSecs, saTokenRefreshIntervalFlagName, DefaultSATokenRefresherIntervalInSecs, "Persistence agent service account token read interval in seconds. "+ + "Defines how often `/var/run/secrets/kubeflow/tokens/kubeflow-persistent_agent-api-token` to be read") } diff --git a/backend/src/apiserver/auth/authenticator_token_review.go b/backend/src/apiserver/auth/authenticator_token_review.go index d0f49e6b04..ddd114841d 100644 --- a/backend/src/apiserver/auth/authenticator_token_review.go +++ b/backend/src/apiserver/auth/authenticator_token_review.go @@ -92,7 +92,8 @@ func (tra *TokenReviewAuthenticator) doTokenReview(ctx context.Context, userIden if !review.Status.Authenticated { return nil, util.NewUnauthenticatedError( errors.New("Failed to authenticate token review"), - "Review.Status.Authenticated is false", + "Review.Status.Authenticated is false. Error %s", + review.Status.Error, ) } if !tra.ensureAudience(review.Status.Audiences) { diff --git a/backend/src/apiserver/client_manager.go b/backend/src/apiserver/client_manager.go index 1549636392..e1056697e5 100644 --- a/backend/src/apiserver/client_manager.go +++ b/backend/src/apiserver/client_manager.go @@ -279,6 +279,16 @@ func initDBClient(initConnectionTimeout time.Duration) *storage.DB { glog.Fatalf("Failed to create index experimentuuid_conditions_finishedatinsec on run_details. Error: %s", response.Error) } + response = db.Model(&model.Run{}).AddIndex("namespace_createatinsec", "Namespace", "CreatedAtInSec") + if response.Error != nil { + glog.Fatalf("Failed to create index namespace_createatinsec on run_details. Error: %s", response.Error) + } + + response = db.Model(&model.Run{}).AddIndex("namespace_conditions_finishedatinsec", "Namespace", "Conditions", "FinishedAtInSec") + if response.Error != nil { + glog.Fatalf("Failed to create index namespace_conditions_finishedatinsec on run_details. Error: %s", response.Error) + } + response = db.Model(&model.Pipeline{}).AddUniqueIndex("name_namespace_index", "Name", "Namespace") if response.Error != nil { glog.Fatalf("Failed to create index name_namespace_index on run_details. Error: %s", response.Error) diff --git a/backend/src/apiserver/common/const.go b/backend/src/apiserver/common/const.go index a50d89512b..85fd981419 100644 --- a/backend/src/apiserver/common/const.go +++ b/backend/src/apiserver/common/const.go @@ -19,12 +19,14 @@ const ( RbacPipelinesGroup = "pipelines.kubeflow.org" RbacPipelinesVersion = "v1beta1" - RbacResourceTypePipelines = "pipelines" - RbacResourceTypeExperiments = "experiments" - RbacResourceTypeRuns = "runs" - RbacResourceTypeJobs = "jobs" - RbacResourceTypeViewers = "viewers" - RbacResourceTypeVisualizations = "visualizations" + RbacResourceTypePipelines = "pipelines" + RbacResourceTypeExperiments = "experiments" + RbacResourceTypeRuns = "runs" + RbacResourceTypeJobs = "jobs" + RbacResourceTypeViewers = "viewers" + RbacResourceTypeVisualizations = "visualizations" + RbacResourceTypeScheduledWorkflows = "scheduledworkflows" + RbacResourceTypeWorkflows = "workflows" RbacResourceVerbArchive = "archive" RbacResourceVerbUpdate = "update" @@ -39,6 +41,7 @@ const ( RbacResourceVerbUnarchive = "unarchive" RbacResourceVerbReportMetrics = "reportMetrics" RbacResourceVerbReadArtifact = "readArtifact" + RbacResourceVerbReport = "report" ) const ( diff --git a/backend/src/apiserver/server/report_server.go b/backend/src/apiserver/server/report_server.go index 1994120c80..7c63b2e463 100644 --- a/backend/src/apiserver/server/report_server.go +++ b/backend/src/apiserver/server/report_server.go @@ -17,6 +17,8 @@ package server import ( "context" "encoding/json" + "github.com/kubeflow/pipelines/backend/src/apiserver/common" + authorizationv1 "k8s.io/api/authorization/v1" "github.com/golang/protobuf/ptypes/empty" apiv1beta1 "github.com/kubeflow/pipelines/backend/api/v1beta1/go_client" @@ -49,6 +51,17 @@ func (s *ReportServer) reportWorkflow(ctx context.Context, workflow string) (*em if err != nil { return nil, util.Wrap(err, "Report workflow failed") } + + executionName := (*execSpec).ExecutionName() + resourceAttributes := &authorizationv1.ResourceAttributes{ + Verb: common.RbacResourceVerbReport, + Resource: common.RbacResourceTypeWorkflows, + } + + if err := s.canAccessWorkflow(ctx, executionName, resourceAttributes); err != nil { + return nil, err + } + newExecSpec, err := s.resourceManager.ReportWorkflowResource(ctx, *execSpec) if err != nil { return nil, util.Wrap(err, "Failed to report workflow") @@ -80,6 +93,15 @@ func (s *ReportServer) reportScheduledWorkflow(ctx context.Context, swf string) if err != nil { return nil, util.Wrap(err, "Report scheduled workflow failed") } + resourceAttributes := &authorizationv1.ResourceAttributes{ + Verb: common.RbacResourceVerbReport, + Resource: common.RbacResourceTypeScheduledWorkflows, + } + err = s.canAccessWorkflow(ctx, string(scheduledWorkflow.UID), resourceAttributes) + if err != nil { + return nil, err + } + err = s.resourceManager.ReportScheduledWorkflowResource(scheduledWorkflow) if err != nil { return nil, err @@ -136,6 +158,16 @@ func validateReportScheduledWorkflowRequest(swfManifest string) (*util.Scheduled return swf, nil } +func (s *ReportServer) canAccessWorkflow(ctx context.Context, executionName string, resourceAttributes *authorizationv1.ResourceAttributes) error { + resourceAttributes.Group = common.RbacPipelinesGroup + resourceAttributes.Version = common.RbacPipelinesVersion + err := s.resourceManager.IsAuthorized(ctx, resourceAttributes) + if err != nil { + return util.Wrapf(err, "Failed to report %s `%s` due to authorization error.", resourceAttributes.Resource, executionName) + } + return nil +} + func NewReportServer(resourceManager *resource.ResourceManager) *ReportServer { return &ReportServer{resourceManager: resourceManager} } diff --git a/backend/src/apiserver/storage/db.go b/backend/src/apiserver/storage/db.go index d15d66c72d..f6ff3e7bf1 100644 --- a/backend/src/apiserver/storage/db.go +++ b/backend/src/apiserver/storage/db.go @@ -59,6 +59,9 @@ type SQLDialect interface { // Inserts new rows and updates duplicates based on the key column. Upsert(query string, key string, overwrite bool, columns ...string) string + + // Updates a table using UPDATE with JOIN (mysql/production) or UPDATE FROM (sqlite/test). + UpdateWithJointOrFrom(targetTable, joinTable, setClause, joinClause, whereClause string) string } // MySQLDialect implements SQLDialect with mysql dialect implementation. @@ -88,6 +91,10 @@ func (d MySQLDialect) IsDuplicateError(err error) bool { return ok && sqlError.Number == mysqlerr.ER_DUP_ENTRY } +func (d MySQLDialect) UpdateWithJointOrFrom(targetTable, joinTable, setClause, joinClause, whereClause string) string { + return fmt.Sprintf("UPDATE %s INNER JOIN %s ON %s SET %s WHERE %s", targetTable, joinTable, joinClause, setClause, whereClause) +} + // SQLiteDialect implements SQLDialect with sqlite dialect implementation. type SQLiteDialect struct{} @@ -131,6 +138,10 @@ func (d SQLiteDialect) IsDuplicateError(err error) bool { return ok && sqlError.Code == sqlite3.ErrConstraint } +func (d SQLiteDialect) UpdateWithJointOrFrom(targetTable, joinTable, setClause, joinClause, whereClause string) string { + return fmt.Sprintf("UPDATE %s SET %s FROM %s WHERE %s AND %s", targetTable, setClause, joinTable, joinClause, whereClause) +} + func NewMySQLDialect() MySQLDialect { return MySQLDialect{} } diff --git a/backend/src/apiserver/storage/db_test.go b/backend/src/apiserver/storage/db_test.go index 256ac4d263..c68510c2e7 100644 --- a/backend/src/apiserver/storage/db_test.go +++ b/backend/src/apiserver/storage/db_test.go @@ -103,11 +103,35 @@ func TestSQLiteDialect_Upsert(t *testing.T) { } func TestMySQLDialect_Upsert(t *testing.T) { - sqliteDialect := NewMySQLDialect() - actualQuery := sqliteDialect.Upsert(`insert into table (uuid, name, namespace) values ("a", "item1", "kubeflow"),("b", "item1", "kubeflow")`, "namespace", true, []string{"uuid", "name"}...) + mysqlDialect := NewMySQLDialect() + actualQuery := mysqlDialect.Upsert(`insert into table (uuid, name, namespace) values ("a", "item1", "kubeflow"),("b", "item1", "kubeflow")`, "namespace", true, []string{"uuid", "name"}...) expectedQuery := `insert into table (uuid, name, namespace) values ("a", "item1", "kubeflow"),("b", "item1", "kubeflow") ON DUPLICATE KEY UPDATE uuid=VALUES(uuid),name=VALUES(name)` assert.Equal(t, expectedQuery, actualQuery) - actualQuery2 := sqliteDialect.Upsert(`insert into table (uuid, name, namespace) values ("a", "item1", "kubeflow"),("b", "item1", "kubeflow")`, "namespace", false, []string{"uuid", "name"}...) + actualQuery2 := mysqlDialect.Upsert(`insert into table (uuid, name, namespace) values ("a", "item1", "kubeflow"),("b", "item1", "kubeflow")`, "namespace", false, []string{"uuid", "name"}...) expectedQuery2 := `insert into table (uuid, name, namespace) values ("a", "item1", "kubeflow"),("b", "item1", "kubeflow") ON DUPLICATE KEY UPDATE uuid=uuid,name=name` assert.Equal(t, expectedQuery2, actualQuery2) } + +func TestMySQLDialect_UpdateWithJointOrFrom(t *testing.T) { + mysqlDialect := NewMySQLDialect() + actualQuery := mysqlDialect.UpdateWithJointOrFrom( + "target_table", + "other_table", + "State = ?", + "target_table.Name = other_table.Name", + "target_table.status = ?") + expectedQuery := `UPDATE target_table INNER JOIN other_table ON target_table.Name = other_table.Name SET State = ? WHERE target_table.status = ?` + assert.Equal(t, expectedQuery, actualQuery) +} + +func TestSQLiteDialect_UpdateWithJointOrFrom(t *testing.T) { + sqliteDialect := NewSQLiteDialect() + actualQuery := sqliteDialect.UpdateWithJointOrFrom( + "target_table", + "other_table", + "State = ?", + "target_table.Name = other_table.Name", + "target_table.status = ?") + expectedQuery := `UPDATE target_table SET State = ? FROM other_table WHERE target_table.Name = other_table.Name AND target_table.status = ?` + assert.Equal(t, expectedQuery, actualQuery) +} diff --git a/backend/src/apiserver/storage/experiment_store.go b/backend/src/apiserver/storage/experiment_store.go index d254537a28..febfa8b2d0 100644 --- a/backend/src/apiserver/storage/experiment_store.go +++ b/backend/src/apiserver/storage/experiment_store.go @@ -309,31 +309,15 @@ func (s *ExperimentStore) ArchiveExperiment(expId string) error { "Failed to create query to archive experiment %s. error: '%v'", expId, err.Error()) } - // TODO(gkcalat): deprecate resource_references table once we migration to v2beta1 is available. - // TODO(jingzhang36): use inner join to replace nested query for better performance. - filteredRunsSql, filteredRunsArgs, err := sq.Select("ResourceUUID"). - From("resource_references as rf"). - Where(sq.And{ - sq.Eq{"rf.ResourceType": model.RunResourceType}, - sq.Eq{"rf.ReferenceUUID": expId}, - sq.Eq{"rf.ReferenceType": model.ExperimentResourceType}, - }).ToSql() - if err != nil { - return util.NewInternalServerError(err, - "Failed to create query to filter the runs in an experiment %s. error: '%v'", expId, err.Error()) - } - updateRunsSql, updateRunsArgs, err := sq. - Update("run_details"). - SetMap(sq.Eq{ - "StorageState": model.StorageStateArchived.ToString(), - }). - Where(sq.NotEq{"StorageState": model.StorageStateArchived.ToString()}). - Where(fmt.Sprintf("UUID in (%s) OR ExperimentUUID = '%s'", filteredRunsSql, expId), filteredRunsArgs...). - ToSql() - if err != nil { - return util.NewInternalServerError(err, - "Failed to create query to archive the runs in an experiment %s. error: '%v'", expId, err.Error()) - } + var updateRunsArgs []interface{} + updateRunsArgs = append(updateRunsArgs, model.StorageStateArchived.ToString(), model.RunResourceType, expId, model.ExperimentResourceType) + // TODO(gkcalat): deprecate resource_references table once we migrate to v2beta1 and switch to filtering on Run's 'experiment_id' instead. + updateRunsSQL := s.db.UpdateWithJointOrFrom( + "run_details", + "resource_references", + "StorageState = ?", + "run_details.UUID = resource_references.ResourceUUID", + "resource_references.ResourceType = ? AND resource_references.ReferenceUUID = ? AND resource_references.ReferenceType = ?") updateRunsWithExperimentUUIDSql, updateRunsWithExperimentUUIDArgs, err := sq. Update("run_details"). @@ -348,32 +332,16 @@ func (s *ExperimentStore) ArchiveExperiment(expId string) error { "Failed to create query to archive the runs in an experiment %s. error: '%v'", expId, err.Error()) } - // TODO(jingzhang36): use inner join to replace nested query for better performance. - filteredJobsSql, filteredJobsArgs, err := sq.Select("ResourceUUID"). - From("resource_references as rf"). - Where(sq.And{ - sq.Eq{"rf.ResourceType": model.JobResourceType}, - sq.Eq{"rf.ReferenceUUID": expId}, - sq.Eq{"rf.ReferenceType": model.ExperimentResourceType}, - }).ToSql() - if err != nil { - return util.NewInternalServerError(err, - "Failed to create query to filter the jobs in an experiment %s. error: '%v'", expId, err.Error()) - } + var updateJobsArgs []interface{} now := s.time.Now().Unix() - updateJobsSql, updateJobsArgs, err := sq. - Update("jobs"). - SetMap(sq.Eq{ - "Enabled": false, - "UpdatedAtInSec": now, - }). - Where(sq.Eq{"Enabled": true}). - Where(fmt.Sprintf("UUID in (%s) OR ExperimentUUID = '%s'", filteredJobsSql, expId), filteredJobsArgs...). - ToSql() - if err != nil { - return util.NewInternalServerError(err, - "Failed to create query to archive the jobs in an experiment %s. error: '%v'", expId, err.Error()) - } + updateJobsArgs = append(updateJobsArgs, false, now, model.JobResourceType, expId, model.ExperimentResourceType) + // TODO(gkcalat): deprecate resource_references table once we migrate to v2beta1 and switch to filtering on Job's `experiment_id' instead. + updateJobsSQL := s.db.UpdateWithJointOrFrom( + "jobs", + "resource_references", + "Enabled = ?, UpdatedAtInSec = ?", + "jobs.UUID = resource_references.ResourceUUID", + "resource_references.ResourceType = ? AND resource_references.ReferenceUUID = ? AND resource_references.ReferenceType = ?") // In a single transaction, we update experiments, run_details and jobs tables. tx, err := s.db.Begin() @@ -388,7 +356,7 @@ func (s *ExperimentStore) ArchiveExperiment(expId string) error { "Failed to archive experiment %s. error: '%v'", expId, err.Error()) } - _, err = tx.Exec(updateRunsSql, updateRunsArgs...) + _, err = tx.Exec(updateRunsSQL, updateRunsArgs...) if err != nil { tx.Rollback() return util.NewInternalServerError(err, @@ -402,7 +370,7 @@ func (s *ExperimentStore) ArchiveExperiment(expId string) error { "Failed to archive runs with ExperimentUUID being %s. error: '%v'", expId, err.Error()) } - _, err = tx.Exec(updateJobsSql, updateJobsArgs...) + _, err = tx.Exec(updateJobsSQL, updateJobsArgs...) if err != nil { tx.Rollback() return util.NewInternalServerError(err, diff --git a/frontend/server/app.ts b/frontend/server/app.ts index 9498d98283..f6ae1988bf 100644 --- a/frontend/server/app.ts +++ b/frontend/server/app.ts @@ -131,6 +131,7 @@ function createUIServer(options: UIConfigs) { '/artifacts/*', getArtifactsProxyHandler({ enabled: options.artifacts.proxy.enabled, + allowedDomain: options.artifacts.allowedDomain, namespacedServiceGetter: getArtifactServiceGetter(options.artifacts.proxy), }), ); diff --git a/frontend/server/configs.ts b/frontend/server/configs.ts index 09b2884acd..f6ef044d12 100644 --- a/frontend/server/configs.ts +++ b/frontend/server/configs.ts @@ -64,6 +64,8 @@ export function loadConfigs(argv: string[], env: ProcessEnv): UIConfigs { AWS_S3_ENDPOINT, /** http/https base URL */ HTTP_BASE_URL = '', + /** By default, allowing access to all domains. Modify this flag to allow querying matching domains */ + ALLOWED_ARTIFACT_DOMAIN_REGEX = '^.*$', /** http/https fetch with this authorization header key (for example: 'Authorization') */ HTTP_AUTHORIZATION_KEY = '', /** http/https fetch with this authorization header value by default when absent in client request at above key */ @@ -155,6 +157,7 @@ export function loadConfigs(argv: string[], env: ProcessEnv): UIConfigs { }, proxy: loadArtifactsProxyConfig(env), streamLogsFromServerApi: asBool(STREAM_LOGS_FROM_SERVER_API), + allowedDomain: ALLOWED_ARTIFACT_DOMAIN_REGEX, }, metadata: { envoyService: { @@ -274,6 +277,7 @@ export interface UIConfigs { http: HttpConfigs; proxy: ArtifactsProxyConfig; streamLogsFromServerApi: boolean; + allowedDomain: string; }; pod: { logContainerName: string; diff --git a/frontend/server/handlers/artifacts.ts b/frontend/server/handlers/artifacts.ts index 7af04b4aac..cbc8a5da84 100644 --- a/frontend/server/handlers/artifacts.ts +++ b/frontend/server/handlers/artifacts.ts @@ -23,6 +23,7 @@ import proxy from 'http-proxy-middleware'; import { HACK_FIX_HPM_PARTIAL_RESPONSE_HEADERS } from '../consts'; import * as fs from 'fs'; +import { isAllowedDomain } from './domain-checker'; /** * ArtifactsQueryStrings describes the expected query strings key value pairs @@ -56,11 +57,12 @@ export function getArtifactsHandler({ aws: AWSConfigs; http: HttpConfigs; minio: MinioConfigs; + allowedDomain: string; }; tryExtract: boolean; useParameter: boolean; }): Handler { - const { aws, http, minio } = artifactsConfigs; + const { aws, http, minio, allowedDomain } = artifactsConfigs; return async (req, res) => { const source = useParameter ? req.params.source : req.query.source; const bucket = useParameter ? req.params.bucket : req.query.bucket; @@ -109,6 +111,7 @@ export function getArtifactsHandler({ case 'http': case 'https': getHttpArtifactsHandler( + allowedDomain, getHttpUrl(source, http.baseUrl || '', bucket, key), http.auth, peek, @@ -126,7 +129,7 @@ export function getArtifactsHandler({ break; default: - res.status(500).send('Unknown storage source: ' + source); + res.status(500).send('Unknown storage source'); return; } }; @@ -146,6 +149,7 @@ function getHttpUrl(source: 'http' | 'https', baseUrl: string, bucket: string, k } function getHttpArtifactsHandler( + allowedDomain: string, url: string, auth: { key: string; @@ -162,9 +166,13 @@ function getHttpArtifactsHandler( headers[auth.key] = req.headers[auth.key] || req.headers[auth.key.toLowerCase()] || auth.defaultValue; } + if (!isAllowedDomain(url, allowedDomain)) { + res.status(500).send(`Domain not allowed.`); + return; + } const response = await fetch(url, { headers }); response.body - .on('error', err => res.status(500).send(`Unable to retrieve artifact at ${url}: ${err}`)) + .on('error', err => res.status(500).send(`Unable to retrieve artifact: ${err}`)) .pipe(new PreviewStream({ peek })) .pipe(res); }; @@ -178,20 +186,12 @@ function getMinioArtifactHandler( try { const stream = await getObjectStream(options); stream - .on('error', err => - res - .status(500) - .send( - `Failed to get object in bucket ${options.bucket} at path ${options.key}: ${err}`, - ), - ) + .on('error', err => res.status(500).send(`Failed to get object in bucket: ${err}`)) .pipe(new PreviewStream({ peek })) .pipe(res); } catch (err) { console.error(err); - res - .status(500) - .send(`Failed to get object in bucket ${options.bucket} at path ${options.key}: ${err}`); + res.status(500).send(`Failed to get object in bucket: ${err}`); } }; } @@ -288,7 +288,8 @@ function getVolumeArtifactsHandler(options: { bucket: string; key: string }, pee filePathInVolume: key, }); if (parseError) { - res.status(404).send(`Failed to open volume://${bucket}/${key}, ${parseError}`); + console.log(`Failed to open volume: ${parseError}`); + res.status(404).send(`Failed to open volume.`); return; } @@ -297,9 +298,7 @@ function getVolumeArtifactsHandler(options: { bucket: string; key: string }, pee if (stat.isDirectory()) { res .status(400) - .send( - `Failed to open volume://${bucket}/${key}, file ${filePath} is directory, does not support now`, - ); + .send(`Failed to open volume file ${filePath} is directory, does not support now`); return; } @@ -307,7 +306,8 @@ function getVolumeArtifactsHandler(options: { bucket: string; key: string }, pee .pipe(new PreviewStream({ peek })) .pipe(res); } catch (err) { - res.status(500).send(`Failed to open volume://${bucket}/${key}: ${err}`); + console.log(`Failed to open volume: ${err}`); + res.status(500).send(`Failed to open volume.`); } }; } @@ -341,9 +341,11 @@ const QUERIES = { export function getArtifactsProxyHandler({ enabled, + allowedDomain, namespacedServiceGetter, }: { enabled: boolean; + allowedDomain: string; namespacedServiceGetter: NamespacedServiceGetter; }): Handler { if (!enabled) { @@ -367,9 +369,15 @@ export function getArtifactsProxyHandler({ router: req => { const namespace = getNamespaceFromUrl(req.url || ''); if (!namespace) { - throw new Error(`namespace query param expected in ${req.url}.`); + console.log(`namespace query param expected in ${req.url}.`); + throw new Error(`namespace query param expected.`); + } + const urlStr = namespacedServiceGetter(namespace!); + if (!isAllowedDomain(urlStr, allowedDomain)) { + console.log(`Domain is not allowed.`); + throw new Error(`Domain is not allowed.`); } - return namespacedServiceGetter(namespace); + return namespacedServiceGetter(namespace!); }, target: '/artifacts', headers: HACK_FIX_HPM_PARTIAL_RESPONSE_HEADERS, diff --git a/frontend/server/handlers/domain-checker.ts b/frontend/server/handlers/domain-checker.ts new file mode 100644 index 0000000000..88ad491759 --- /dev/null +++ b/frontend/server/handlers/domain-checker.ts @@ -0,0 +1,32 @@ +// Copyright 2023 The Kubeflow Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +export function isAllowedDomain(urlStr: string, allowedDomain: string): boolean { + const allowedRegExp = new RegExp(allowedDomain); + const domain = domain_from_url(urlStr); + const allowed = allowedRegExp.test(domain); + if (!allowed) { + console.log(`Domain not allowed: ${urlStr}`); + } + return allowed; +} + +function domain_from_url(url: string): string { + let result: string = ''; + let match = url.match(/^(?:https?:\/\/)?(?:[^@\/\n]+@)?([^:\/?\n]+)/); + if (match) { + result = match[0]; + } + return result; +} diff --git a/frontend/server/integration-tests/artifact-get.test.ts b/frontend/server/integration-tests/artifact-get.test.ts index 868678233d..52cce57d7c 100644 --- a/frontend/server/integration-tests/artifact-get.test.ts +++ b/frontend/server/integration-tests/artifact-get.test.ts @@ -438,11 +438,7 @@ describe('/artifacts', () => { const request = requests(app.start()); request .get(`/artifacts/get?source=volume&bucket=notexist&key=content`) - .expect( - 404, - 'Failed to open volume://notexist/content, Cannot find file "volume://notexist/content" in pod "ml-pipeline-ui": volume "notexist" not configured', - done, - ); + .expect(404, 'Failed to open volume.', done); }); it('responds error with a not exist volume mount path if source=volume', done => { @@ -485,11 +481,7 @@ describe('/artifacts', () => { const request = requests(app.start()); request .get(`/artifacts/get?source=volume&bucket=artifact&key=notexist/config`) - .expect( - 404, - 'Failed to open volume://artifact/notexist/config, Cannot find file "volume://artifact/notexist/config" in pod "ml-pipeline-ui": volume "artifact" not mounted or volume "artifact" with subPath (which is prefix of notexist/config) not mounted', - done, - ); + .expect(404, 'Failed to open volume.', done); }); it('responds error with a not exist volume mount artifact if source=volume', done => { @@ -529,11 +521,7 @@ describe('/artifacts', () => { const request = requests(app.start()); request .get(`/artifacts/get?source=volume&bucket=artifact&key=subartifact/notxist.csv`) - .expect( - 500, - "Failed to open volume://artifact/subartifact/notxist.csv: Error: ENOENT: no such file or directory, stat '/foo/bar/notxist.csv'", - done, - ); + .expect(500, 'Failed to open volume.', done); }); }); diff --git a/manifests/gcp_marketplace/chart/kubeflow-pipelines/templates/application.yaml b/manifests/gcp_marketplace/chart/kubeflow-pipelines/templates/application.yaml index 42542f7292..fd778769e4 100644 --- a/manifests/gcp_marketplace/chart/kubeflow-pipelines/templates/application.yaml +++ b/manifests/gcp_marketplace/chart/kubeflow-pipelines/templates/application.yaml @@ -12,7 +12,7 @@ metadata: spec: descriptor: type: Kubeflow Pipelines - version: 2.0.0 + version: 2.0.1 description: |- Reusable end-to-end ML workflow maintainers: diff --git a/manifests/gcp_marketplace/schema.yaml b/manifests/gcp_marketplace/schema.yaml index ef985d265e..46cd2b385e 100644 --- a/manifests/gcp_marketplace/schema.yaml +++ b/manifests/gcp_marketplace/schema.yaml @@ -1,9 +1,9 @@ x-google-marketplace: schemaVersion: v2 applicationApiVersion: v1beta1 - publishedVersion: 2.0.0 + publishedVersion: 2.0.1 publishedVersionMetadata: - releaseNote: Based on 2.0.0 version. + releaseNote: Based on 2.0.1 version. releaseTypes: - Feature recommended: false diff --git a/manifests/kustomize/base/cache-deployer/kustomization.yaml b/manifests/kustomize/base/cache-deployer/kustomization.yaml index 3640b1657c..a9640aa6cb 100644 --- a/manifests/kustomize/base/cache-deployer/kustomization.yaml +++ b/manifests/kustomize/base/cache-deployer/kustomization.yaml @@ -8,4 +8,4 @@ commonLabels: app: cache-deployer images: - name: gcr.io/ml-pipeline/cache-deployer - newTag: 2.0.0 + newTag: 2.0.1 diff --git a/manifests/kustomize/base/cache/kustomization.yaml b/manifests/kustomize/base/cache/kustomization.yaml index a2c39814f5..56c40afbe4 100644 --- a/manifests/kustomize/base/cache/kustomization.yaml +++ b/manifests/kustomize/base/cache/kustomization.yaml @@ -10,4 +10,4 @@ commonLabels: app: cache-server images: - name: gcr.io/ml-pipeline/cache-server - newTag: 2.0.0 + newTag: 2.0.1 diff --git a/manifests/kustomize/base/installs/generic/pipeline-install-config.yaml b/manifests/kustomize/base/installs/generic/pipeline-install-config.yaml index 0a2a11d0c9..47ce6b687d 100644 --- a/manifests/kustomize/base/installs/generic/pipeline-install-config.yaml +++ b/manifests/kustomize/base/installs/generic/pipeline-install-config.yaml @@ -11,7 +11,7 @@ data: until the changes take effect. A quick way to restart all deployments in a namespace: `kubectl rollout restart deployment -n `. appName: pipeline - appVersion: 2.0.0 + appVersion: 2.0.1 dbHost: mysql dbPort: "3306" mlmdDb: metadb diff --git a/manifests/kustomize/base/installs/multi-user/persistence-agent/cluster-role.yaml b/manifests/kustomize/base/installs/multi-user/persistence-agent/cluster-role.yaml index 8334211471..2df34121ba 100644 --- a/manifests/kustomize/base/installs/multi-user/persistence-agent/cluster-role.yaml +++ b/manifests/kustomize/base/installs/multi-user/persistence-agent/cluster-role.yaml @@ -19,6 +19,13 @@ rules: - get - list - watch +- apiGroups: + - pipelines.kubeflow.org + resources: + - scheduledworkflows + - workflows + verbs: + - report - apiGroups: - '' resources: diff --git a/manifests/kustomize/base/metadata/base/kustomization.yaml b/manifests/kustomize/base/metadata/base/kustomization.yaml index 290394fac4..400315b107 100644 --- a/manifests/kustomize/base/metadata/base/kustomization.yaml +++ b/manifests/kustomize/base/metadata/base/kustomization.yaml @@ -10,4 +10,4 @@ resources: namespace: kubeflow images: - name: gcr.io/ml-pipeline/metadata-envoy - newTag: 2.0.0 + newTag: 2.0.1 diff --git a/manifests/kustomize/base/pipeline/kustomization.yaml b/manifests/kustomize/base/pipeline/kustomization.yaml index 4fd9e2cc5d..d04099e521 100644 --- a/manifests/kustomize/base/pipeline/kustomization.yaml +++ b/manifests/kustomize/base/pipeline/kustomization.yaml @@ -37,14 +37,14 @@ resources: - kfp-launcher-configmap.yaml images: - name: gcr.io/ml-pipeline/api-server - newTag: 2.0.0 + newTag: 2.0.1 - name: gcr.io/ml-pipeline/persistenceagent - newTag: 2.0.0 + newTag: 2.0.1 - name: gcr.io/ml-pipeline/scheduledworkflow - newTag: 2.0.0 + newTag: 2.0.1 - name: gcr.io/ml-pipeline/frontend - newTag: 2.0.0 + newTag: 2.0.1 - name: gcr.io/ml-pipeline/viewer-crd-controller - newTag: 2.0.0 + newTag: 2.0.1 - name: gcr.io/ml-pipeline/visualization-server - newTag: 2.0.0 + newTag: 2.0.1 diff --git a/manifests/kustomize/base/pipeline/metadata-writer/kustomization.yaml b/manifests/kustomize/base/pipeline/metadata-writer/kustomization.yaml index c78915c688..2d118e33cf 100644 --- a/manifests/kustomize/base/pipeline/metadata-writer/kustomization.yaml +++ b/manifests/kustomize/base/pipeline/metadata-writer/kustomization.yaml @@ -7,4 +7,4 @@ resources: - metadata-writer-sa.yaml images: - name: gcr.io/ml-pipeline/metadata-writer - newTag: 2.0.0 + newTag: 2.0.1 diff --git a/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-deployment.yaml b/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-deployment.yaml index 2137a5c30c..7e979be070 100644 --- a/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-deployment.yaml +++ b/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-deployment.yaml @@ -38,4 +38,15 @@ spec: requests: cpu: 120m memory: 500Mi + volumeMounts: + - mountPath: /var/run/secrets/kubeflow/tokens + name: persistenceagent-sa-token serviceAccountName: ml-pipeline-persistenceagent + volumes: + - name: persistenceagent-sa-token + projected: + sources: + - serviceAccountToken: + path: persistenceagent-sa-token + expirationSeconds: 3600 + audience: pipelines.kubeflow.org diff --git a/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-role.yaml b/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-role.yaml index 4790f17b24..e95df98b88 100644 --- a/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-role.yaml +++ b/manifests/kustomize/base/pipeline/ml-pipeline-persistenceagent-role.yaml @@ -19,6 +19,13 @@ rules: - get - list - watch +- apiGroups: + - pipelines.kubeflow.org + resources: + - scheduledworkflows + - workflows + verbs: + - report - apiGroups: - '' resources: diff --git a/manifests/kustomize/env/gcp/inverse-proxy/kustomization.yaml b/manifests/kustomize/env/gcp/inverse-proxy/kustomization.yaml index cd80c7ce0a..e8798ef205 100644 --- a/manifests/kustomize/env/gcp/inverse-proxy/kustomization.yaml +++ b/manifests/kustomize/env/gcp/inverse-proxy/kustomization.yaml @@ -2,7 +2,7 @@ apiVersion: kustomize.config.k8s.io/v1beta1 kind: Kustomization images: - name: gcr.io/ml-pipeline/inverse-proxy-agent - newTag: 2.0.0 + newTag: 2.0.1 resources: - proxy-configmap.yaml - proxy-deployment.yaml