diff --git a/api/v1/clusterextension_types.go b/api/v1/clusterextension_types.go index 2846b24c6..d150d4997 100644 --- a/api/v1/clusterextension_types.go +++ b/api/v1/clusterextension_types.go @@ -107,6 +107,17 @@ type ClusterExtensionSpec struct { // // +optional Config *ClusterExtensionConfig `json:"config,omitempty"` + + // progressDeadlineMinutes is an optional field that defines the maximum period + // of time in minutes after which an installation should be considered failed and + // require manual intervention. This functionality is disabled when no value + // is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + // + // +kubebuilder:validation:Minimum:=10 + // +kubebuilder:validation:Maximum:=720 + // +optional + // + ProgressDeadlineMinutes int32 `json:"progressDeadlineMinutes,omitempty"` } const SourceTypeCatalog = "Catalog" diff --git a/api/v1/clusterextensionrevision_types.go b/api/v1/clusterextensionrevision_types.go index f7e7ff064..0d733be61 100644 --- a/api/v1/clusterextensionrevision_types.go +++ b/api/v1/clusterextensionrevision_types.go @@ -87,6 +87,17 @@ type ClusterExtensionRevisionSpec struct { // +listMapKey=name // +optional Phases []ClusterExtensionRevisionPhase `json:"phases,omitempty"` + + // progressDeadlineMinutes is an optional field that defines the maximum period + // of time in minutes after which an installation should be considered failed and + // require manual intervention. This functionality is disabled when no value + // is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + // + // +kubebuilder:validation:Minimum:=10 + // +kubebuilder:validation:Maximum:=720 + // +optional + // + ProgressDeadlineMinutes int32 `json:"progressDeadlineMinutes,omitempty"` } // ClusterExtensionRevisionLifecycleState specifies the lifecycle state of the ClusterExtensionRevision. diff --git a/api/v1/common_types.go b/api/v1/common_types.go index 115836b10..57e030f0b 100644 --- a/api/v1/common_types.go +++ b/api/v1/common_types.go @@ -32,6 +32,7 @@ const ( ReasonDeprecated = "Deprecated" // Common reasons - ReasonSucceeded = "Succeeded" - ReasonFailed = "Failed" + ReasonSucceeded = "Succeeded" + ReasonFailed = "Failed" + ReasonProgressDeadlineExceeded = "ProgressDeadlineExceeded" ) diff --git a/api/v1/validation_test.go b/api/v1/validation_test.go new file mode 100644 index 000000000..bbd755c3c --- /dev/null +++ b/api/v1/validation_test.go @@ -0,0 +1,176 @@ +package v1 + +import ( + "fmt" + "testing" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +func TestValidate(t *testing.T) { + type args struct { + object any + skipDefaulting bool + } + type want struct { + valid bool + } + type testCase struct { + args args + want want + } + defaultExtensionSpec := func(s *ClusterExtensionSpec) *ClusterExtensionSpec { + s.Namespace = "ns" + s.ServiceAccount = ServiceAccountReference{ + Name: "sa", + } + s.Source = SourceConfig{ + SourceType: SourceTypeCatalog, + Catalog: &CatalogFilter{ + PackageName: "test", + }, + } + return s + } + defaultRevisionSpec := func(s *ClusterExtensionRevisionSpec) *ClusterExtensionRevisionSpec { + s.Revision = 1 + return s + } + c := newClient(t) + i := 0 + + for name, tc := range map[string]testCase{ + "ClusterExtension: invalid progress deadline < 10": { + args: args{ + object: ClusterExtensionSpec{ + ProgressDeadlineMinutes: 9, + }, + }, + want: want{valid: false}, + }, + "ClusterExtension: valid progress deadline = 10": { + args: args{ + object: ClusterExtensionSpec{ + ProgressDeadlineMinutes: 10, + }, + }, + want: want{valid: true}, + }, + "ClusterExtension: valid progress deadline = 360": { + args: args{ + object: ClusterExtensionSpec{ + ProgressDeadlineMinutes: 360, + }, + }, + want: want{valid: true}, + }, + "ClusterExtension: valid progress deadline = 720": { + args: args{ + object: ClusterExtensionSpec{ + ProgressDeadlineMinutes: 720, + }, + }, + want: want{valid: true}, + }, + "ClusterExtension: invalid progress deadline > 720": { + args: args{ + object: ClusterExtensionSpec{ + ProgressDeadlineMinutes: 721, + }, + }, + want: want{valid: false}, + }, + "ClusterExtension: no progress deadline set": { + args: args{ + object: ClusterExtensionSpec{}, + }, + want: want{valid: true}, + }, + "ClusterExtensionRevision: invalid progress deadline < 10": { + args: args{ + object: ClusterExtensionRevisionSpec{ + ProgressDeadlineMinutes: 9, + }, + }, + want: want{valid: false}, + }, + "ClusterExtensionRevision: valid progress deadline = 10": { + args: args{ + object: ClusterExtensionRevisionSpec{ + ProgressDeadlineMinutes: 10, + }, + }, + want: want{valid: true}, + }, + "ClusterExtensionRevision: valid progress deadline = 360": { + args: args{ + object: ClusterExtensionRevisionSpec{ + ProgressDeadlineMinutes: 360, + }, + }, + want: want{valid: true}, + }, + "ClusterExtensionRevision: valid progress deadline = 720": { + args: args{ + object: ClusterExtensionRevisionSpec{ + ProgressDeadlineMinutes: 720, + }, + }, + want: want{valid: true}, + }, + "ClusterExtensionRevision: invalid progress deadline > 720": { + args: args{ + object: ClusterExtensionRevisionSpec{ + ProgressDeadlineMinutes: 721, + }, + }, + want: want{valid: false}, + }, + "ClusterExtensionRevision: no progress deadline set": { + args: args{ + object: ClusterExtensionRevisionSpec{}, + }, + want: want{valid: true}, + }, + } { + t.Run(name, func(t *testing.T) { + var obj client.Object + switch s := tc.args.object.(type) { + case ClusterExtensionSpec: + ce := &ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("ce-%d", i), + }, + Spec: s, + } + if !tc.args.skipDefaulting { + defaultExtensionSpec(&ce.Spec) + } + obj = ce + case ClusterExtensionRevisionSpec: + cer := &ClusterExtensionRevision{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("cer-%d", i), + }, + Spec: s, + } + if !tc.args.skipDefaulting { + defaultRevisionSpec(&cer.Spec) + } + obj = cer + default: + t.Fatalf("unknown type %T", s) + } + i++ + err := c.Create(t.Context(), obj) + if tc.want.valid && err != nil { + t.Fatal("expected create to succeed, but got:", err) + } + if !tc.want.valid && !errors.IsInvalid(err) { + t.Fatal("expected create to fail due to invalid payload, but got:", err) + } + }) + } +} diff --git a/cmd/operator-controller/main.go b/cmd/operator-controller/main.go index 2d78edda4..f02818f1d 100644 --- a/cmd/operator-controller/main.go +++ b/cmd/operator-controller/main.go @@ -598,7 +598,12 @@ func (c *boxcutterReconcilerConfigurator) Configure(ceReconciler *controllers.Cl return err } - // TODO: add support for preflight checks + // determine if PreAuthorizer should be enabled based on feature gate + var preAuth authorization.PreAuthorizer + if features.OperatorControllerFeatureGate.Enabled(features.PreflightPermissions) { + preAuth = authorization.NewRBACPreAuthorizer(c.mgr.GetClient()) + } + // TODO: better scheme handling - which types do we want to support? _ = apiextensionsv1.AddToScheme(c.mgr.GetScheme()) rg := &applier.SimpleRevisionGenerator{ @@ -610,6 +615,7 @@ func (c *boxcutterReconcilerConfigurator) Configure(ceReconciler *controllers.Cl Scheme: c.mgr.GetScheme(), RevisionGenerator: rg, Preflights: c.preflights, + PreAuthorizer: preAuth, FieldOwner: fmt.Sprintf("%s/clusterextension-controller", fieldOwnerPrefix), } revisionStatesGetter := &controllers.BoxcutterRevisionStatesGetter{Reader: c.mgr.GetClient()} diff --git a/commitchecker.yaml b/commitchecker.yaml index bd0c1d5bf..44a9c8c13 100644 --- a/commitchecker.yaml +++ b/commitchecker.yaml @@ -1,4 +1,4 @@ -expectedMergeBase: b08a054acc3892fb5b1977299c8a92cbddd90819 +expectedMergeBase: 8167ff8fd3ab3880459f9d1e5626c6e3073e3428 upstreamBranch: main upstreamOrg: operator-framework upstreamRepo: operator-controller diff --git a/docs/api-reference/olmv1-api-reference.md b/docs/api-reference/olmv1-api-reference.md index 4feb0d632..c9cff6e48 100644 --- a/docs/api-reference/olmv1-api-reference.md +++ b/docs/api-reference/olmv1-api-reference.md @@ -344,6 +344,7 @@ _Appears in:_ | `source` _[SourceConfig](#sourceconfig)_ | source is required and selects the installation source of content for this ClusterExtension.
Set the sourceType field to perform the selection.
Catalog is currently the only implemented sourceType.
Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
| | `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is optional and configures installation options for the ClusterExtension,
such as the pre-flight check configuration. | | | | `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is optional and specifies bundle-specific configuration.
Configuration is bundle-specific and a bundle may provide a configuration schema.
When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
to configure bundles can be found in the OLM documentation associated with your current OLM version. | | | +| `progressDeadlineMinutes` _integer_ | progressDeadlineMinutes is an optional field that defines the maximum period
of time in minutes after which an installation should be considered failed and
require manual intervention. This functionality is disabled when no value
is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours).
| | Maximum: 720
Minimum: 10
| #### ClusterExtensionStatus diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml index 1e435dc70..31e267b30 100644 --- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml +++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensionrevisions.yaml @@ -166,6 +166,16 @@ spec: x-kubernetes-validations: - message: phases is immutable rule: self == oldSelf || oldSelf.size() == 0 + progressDeadlineMinutes: + description: |- + progressDeadlineMinutes is an optional field that defines the maximum period + of time in minutes after which an installation should be considered failed and + require manual intervention. This functionality is disabled when no value + is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + format: int32 + maximum: 720 + minimum: 10 + type: integer revision: description: |- revision is a required, immutable sequence number representing a specific revision diff --git a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml index 7194392b6..66824fa12 100644 --- a/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml +++ b/helm/olmv1/base/operator-controller/crd/experimental/olm.operatorframework.io_clusterextensions.yaml @@ -165,6 +165,16 @@ spec: rule: self == oldSelf - message: namespace must be a valid DNS1123 label rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + progressDeadlineMinutes: + description: |- + progressDeadlineMinutes is an optional field that defines the maximum period + of time in minutes after which an installation should be considered failed and + require manual intervention. This functionality is disabled when no value + is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + format: int32 + maximum: 720 + minimum: 10 + type: integer serviceAccount: description: |- serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster diff --git a/internal/operator-controller/applier/boxcutter.go b/internal/operator-controller/applier/boxcutter.go index 63578e9cb..6030d1c77 100644 --- a/internal/operator-controller/applier/boxcutter.go +++ b/internal/operator-controller/applier/boxcutter.go @@ -1,10 +1,12 @@ package applier import ( + "bytes" "cmp" "context" "errors" "fmt" + "io" "io/fs" "maps" "slices" @@ -16,6 +18,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" + "k8s.io/cli-runtime/pkg/printers" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -25,6 +30,7 @@ import ( helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client" ocv1 "github.com/operator-framework/operator-controller/api/v1" + "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" "github.com/operator-framework/operator-controller/internal/shared/util/cache" ) @@ -191,7 +197,7 @@ func (r *SimpleRevisionGenerator) buildClusterExtensionRevision( annotations[labels.ServiceAccountNameKey] = ext.Spec.ServiceAccount.Name annotations[labels.ServiceAccountNamespaceKey] = ext.Spec.Namespace - return &ocv1.ClusterExtensionRevision{ + cer := &ocv1.ClusterExtensionRevision{ ObjectMeta: metav1.ObjectMeta{ Annotations: annotations, Labels: map[string]string{ @@ -206,6 +212,10 @@ func (r *SimpleRevisionGenerator) buildClusterExtensionRevision( Phases: PhaseSort(objects), }, } + if p := ext.Spec.ProgressDeadlineMinutes; p > 0 { + cer.Spec.ProgressDeadlineMinutes = p + } + return cer } // BoxcutterStorageMigrator migrates ClusterExtensions from Helm-based storage to @@ -279,28 +289,27 @@ type Boxcutter struct { Scheme *runtime.Scheme RevisionGenerator ClusterExtensionRevisionGenerator Preflights []Preflight + PreAuthorizer authorization.PreAuthorizer FieldOwner string } -func (bc *Boxcutter) getObjects(rev *ocv1.ClusterExtensionRevision) []client.Object { - var objs []client.Object - for _, phase := range rev.Spec.Phases { - for _, phaseObject := range phase.Objects { - objs = append(objs, &phaseObject.Object) - } - } - return objs -} - -func (bc *Boxcutter) createOrUpdate(ctx context.Context, obj client.Object) error { - if obj.GetObjectKind().GroupVersionKind().Empty() { - gvk, err := apiutil.GVKForObject(obj, bc.Scheme) +// createOrUpdate creates or updates the revision object. PreAuthorization checks are performed to ensure the +// user has sufficient permissions to manage the revision and its resources +func (bc *Boxcutter) createOrUpdate(ctx context.Context, user user.Info, rev *ocv1.ClusterExtensionRevision) error { + if rev.GetObjectKind().GroupVersionKind().Empty() { + gvk, err := apiutil.GVKForObject(rev, bc.Scheme) if err != nil { return err } - obj.GetObjectKind().SetGroupVersionKind(gvk) + rev.GetObjectKind().SetGroupVersionKind(gvk) } - return bc.Client.Patch(ctx, obj, client.Apply, client.FieldOwner(bc.FieldOwner), client.ForceOwnership) + + // Run auth preflight checks + if err := bc.runPreAuthorizationChecks(ctx, user, rev); err != nil { + return err + } + + return bc.Client.Patch(ctx, rev, client.Apply, client.FieldOwner(bc.FieldOwner), client.ForceOwnership) } func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotations map[string]string) error { @@ -329,7 +338,7 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust desiredRevision.Spec.Revision = currentRevision.Spec.Revision desiredRevision.Name = currentRevision.Name - err := bc.createOrUpdate(ctx, desiredRevision) + err := bc.createOrUpdate(ctx, getUserInfo(ext), desiredRevision) switch { case apierrors.IsInvalid(err): // We could not update the current revision due to trying to update an immutable field. @@ -344,7 +353,7 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust } // Preflights - plainObjs := bc.getObjects(desiredRevision) + plainObjs := getObjects(desiredRevision) for _, preflight := range bc.Preflights { if shouldSkipPreflight(ctx, preflight, ext, state) { continue @@ -379,7 +388,7 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust return fmt.Errorf("garbage collecting old revisions: %w", err) } - if err := bc.createOrUpdate(ctx, desiredRevision); err != nil { + if err := bc.createOrUpdate(ctx, getUserInfo(ext), desiredRevision); err != nil { return fmt.Errorf("creating new Revision: %w", err) } } @@ -387,6 +396,23 @@ func (bc *Boxcutter) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.Clust return nil } +// runPreAuthorizationChecks runs PreAuthorization checks if the PreAuthorizer is set. An error will be returned if +// the ClusterExtension service account does not have the necessary permissions to manage the revision's resources +func (bc *Boxcutter) runPreAuthorizationChecks(ctx context.Context, user user.Info, rev *ocv1.ClusterExtensionRevision) error { + if bc.PreAuthorizer == nil { + return nil + } + + // collect the revision manifests + manifestReader, err := revisionManifestReader(rev) + if err != nil { + return err + } + + // run preauthorization check + return formatPreAuthorizerOutput(bc.PreAuthorizer.PreAuthorize(ctx, user, manifestReader, revisionManagementPerms(rev))) +} + // garbageCollectOldRevisions deletes archived revisions beyond ClusterExtensionRevisionRetentionLimit. // Active revisions are never deleted. revisionList must be sorted oldest to newest. func (bc *Boxcutter) garbageCollectOldRevisions(ctx context.Context, revisionList []ocv1.ClusterExtensionRevision) error { @@ -445,3 +471,43 @@ func splitManifestDocuments(file string) []string { } return docs } + +// getObjects returns a slice of all objects in the revision +func getObjects(rev *ocv1.ClusterExtensionRevision) []client.Object { + var objs []client.Object + for _, phase := range rev.Spec.Phases { + for _, phaseObject := range phase.Objects { + objs = append(objs, &phaseObject.Object) + } + } + return objs +} + +// revisionManifestReader returns an io.Reader containing all manifests in the revision +func revisionManifestReader(rev *ocv1.ClusterExtensionRevision) (io.Reader, error) { + printer := printers.YAMLPrinter{} + buf := new(bytes.Buffer) + for _, obj := range getObjects(rev) { + buf.WriteString("---\n") + if err := printer.PrintObj(obj, buf); err != nil { + return nil, err + } + } + return buf, nil +} + +func revisionManagementPerms(rev *ocv1.ClusterExtensionRevision) func(user.Info) []authorizer.AttributesRecord { + return func(user user.Info) []authorizer.AttributesRecord { + return []authorizer.AttributesRecord{ + { + User: user, + Name: rev.Name, + APIGroup: ocv1.GroupVersion.Group, + APIVersion: ocv1.GroupVersion.Version, + Resource: "clusterextensionrevisions/finalizers", + ResourceRequest: true, + Verb: "update", + }, + } + } +} diff --git a/internal/operator-controller/applier/boxcutter_test.go b/internal/operator-controller/applier/boxcutter_test.go index 081747285..130a6057b 100644 --- a/internal/operator-controller/applier/boxcutter_test.go +++ b/internal/operator-controller/applier/boxcutter_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io" "io/fs" "strings" "testing" @@ -16,18 +17,23 @@ import ( "helm.sh/helm/v3/pkg/storage/driver" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" k8scheme "k8s.io/client-go/kubernetes/scheme" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/client/interceptor" ocv1 "github.com/operator-framework/operator-controller/api/v1" "github.com/operator-framework/operator-controller/internal/operator-controller/applier" + "github.com/operator-framework/operator-controller/internal/operator-controller/authorization" "github.com/operator-framework/operator-controller/internal/operator-controller/labels" ) @@ -327,6 +333,65 @@ func Test_SimpleRevisionGenerator_AppliesObjectLabelsAndRevisionAnnotations(t *t require.Equal(t, revAnnotations, rev.Annotations) } +func Test_SimpleRevisionGenerator_PropagatesProgressDeadlineMinutes(t *testing.T) { + r := &FakeManifestProvider{ + GetFn: func(b fs.FS, e *ocv1.ClusterExtension) ([]client.Object, error) { + return []client.Object{}, nil + }, + } + + b := applier.SimpleRevisionGenerator{ + Scheme: k8scheme.Scheme, + ManifestProvider: r, + } + + type args struct { + progressDeadlineMinutes *int32 + } + type want struct { + progressDeadlineMinutes int32 + } + type testCase struct { + args args + want want + } + for name, tc := range map[string]testCase{ + "propagates when set": { + args: args{ + progressDeadlineMinutes: ptr.To(int32(10)), + }, + want: want{ + progressDeadlineMinutes: 10, + }, + }, + "do not propagate when unset": { + want: want{ + progressDeadlineMinutes: 0, + }, + }, + } { + ext := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-extension", + }, + Spec: ocv1.ClusterExtensionSpec{ + Namespace: "test-namespace", + ServiceAccount: ocv1.ServiceAccountReference{Name: "test-sa"}, + }, + } + empty := map[string]string{} + t.Run(name, func(t *testing.T) { + if pd := tc.args.progressDeadlineMinutes; pd != nil { + ext.Spec.ProgressDeadlineMinutes = *pd + } + + rev, err := b.GenerateRevision(t.Context(), fstest.MapFS{}, ext, empty, empty) + require.NoError(t, err) + require.Equal(t, tc.want.progressDeadlineMinutes, rev.Spec.ProgressDeadlineMinutes) + }) + } +} + func Test_SimpleRevisionGenerator_Failure(t *testing.T) { r := &FakeManifestProvider{ GetFn: func(b fs.FS, e *ocv1.ClusterExtension) ([]client.Object, error) { @@ -948,6 +1013,191 @@ func TestBoxcutter_Apply(t *testing.T) { } } +func Test_PreAuthorizer_Integration(t *testing.T) { + testScheme := runtime.NewScheme() + require.NoError(t, ocv1.AddToScheme(testScheme)) + + // This is the revision that the mock builder will produce by default. + // We calculate its hash to use in the tests. + ext := &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ext", + UID: "test-uid", + }, + Spec: ocv1.ClusterExtensionSpec{ + Namespace: "test-namespace", + ServiceAccount: ocv1.ServiceAccountReference{ + Name: "test-sa", + }, + }, + } + fakeClient := fake.NewClientBuilder().WithScheme(testScheme).Build() + dummyGenerator := &mockBundleRevisionBuilder{ + makeRevisionFunc: func(ctx context.Context, bundleFS fs.FS, ext *ocv1.ClusterExtension, objectLabels, revisionAnnotation map[string]string) (*ocv1.ClusterExtensionRevision, error) { + return &ocv1.ClusterExtensionRevision{ + Spec: ocv1.ClusterExtensionRevisionSpec{ + Phases: []ocv1.ClusterExtensionRevisionPhase{ + { + Name: "some-phase", + Objects: []ocv1.ClusterExtensionRevisionObject{ + { + Object: unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "ConfigMap", + "data": map[string]string{ + "test-data": "test-data", + }, + }, + }, + }, + }, + }, + }, + }, + }, nil + }, + } + dummyBundleFs := fstest.MapFS{} + revisionAnnotations := map[string]string{} + + for _, tc := range []struct { + name string + preAuthorizer func(t *testing.T) authorization.PreAuthorizer + validate func(t *testing.T, err error) + }{ + { + name: "preauthorizer called with correct parameters", + preAuthorizer: func(t *testing.T) authorization.PreAuthorizer { + return &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + require.Equal(t, "system:serviceaccount:test-namespace:test-sa", user.GetName()) + require.Empty(t, user.GetUID()) + require.Nil(t, user.GetExtra()) + require.Empty(t, user.GetGroups()) + + t.Log("has correct additional permissions") + require.Len(t, additionalRequiredPerms, 1) + perms := additionalRequiredPerms[0](user) + + require.Len(t, perms, 1) + require.Equal(t, authorizer.AttributesRecord{ + User: user, + Name: "test-ext-1", + APIGroup: "olm.operatorframework.io", + APIVersion: "v1", + Resource: "clusterextensionrevisions/finalizers", + ResourceRequest: true, + Verb: "update", + }, perms[0]) + + t.Log("has correct manifest reader") + manifests, err := io.ReadAll(reader) + require.NoError(t, err) + require.Equal(t, "---\napiVersion: v1\ndata:\n test-data: test-data\nkind: ConfigMap\n", string(manifests)) + return nil, nil + }, + } + }, + }, { + name: "preauthorizer errors are returned", + preAuthorizer: func(t *testing.T) authorization.PreAuthorizer { + return &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return nil, errors.New("test error") + }, + } + }, + validate: func(t *testing.T, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "pre-authorization failed") + require.Contains(t, err.Error(), "authorization evaluation error: test error") + }, + }, { + name: "preauthorizer missing permissions are returned as an error", + preAuthorizer: func(t *testing.T) authorization.PreAuthorizer { + return &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return []authorization.ScopedPolicyRules{ + { + Namespace: "", + MissingRules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + }, + }, nil + }, + } + }, + validate: func(t *testing.T, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "pre-authorization failed") + require.Contains(t, err.Error(), "service account requires the following permissions") + require.Contains(t, err.Error(), "Resources:[pods]") + require.Contains(t, err.Error(), "Verbs:[get,list,watch]") + }, + }, { + name: "preauthorizer missing permissions and errors are combined and returned as an error", + preAuthorizer: func(t *testing.T) authorization.PreAuthorizer { + return &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return []authorization.ScopedPolicyRules{ + { + Namespace: "", + MissingRules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + }, + }, errors.New("test error") + }, + } + }, + validate: func(t *testing.T, err error) { + require.Error(t, err) + require.Contains(t, err.Error(), "pre-authorization failed") + require.Contains(t, err.Error(), "service account requires the following permissions") + require.Contains(t, err.Error(), "Resources:[pods]") + require.Contains(t, err.Error(), "Verbs:[get,list,watch]") + require.Contains(t, err.Error(), "authorization evaluation error: test error") + }, + }, { + name: "successful call to preauthorizer does not block applier", + preAuthorizer: func(t *testing.T) authorization.PreAuthorizer { + return &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return nil, nil + }, + } + }, + validate: func(t *testing.T, err error) { + require.NoError(t, err) + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + boxcutter := &applier.Boxcutter{ + Client: fakeClient, + Scheme: testScheme, + FieldOwner: "test-owner", + RevisionGenerator: dummyGenerator, + PreAuthorizer: tc.preAuthorizer(t), + } + err := boxcutter.Apply(t.Context(), dummyBundleFs, ext, nil, revisionAnnotations) + if tc.validate != nil { + tc.validate(t, err) + } + }) + } +} + func TestBoxcutterStorageMigrator(t *testing.T) { t.Run("creates revision", func(t *testing.T) { testScheme := runtime.NewScheme() diff --git a/internal/operator-controller/applier/helm.go b/internal/operator-controller/applier/helm.go index 22ed096fe..86b5440bc 100644 --- a/internal/operator-controller/applier/helm.go +++ b/internal/operator-controller/applier/helm.go @@ -19,6 +19,8 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" apimachyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" @@ -77,29 +79,8 @@ func (h *Helm) runPreAuthorizationChecks(ctx context.Context, ext *ocv1.ClusterE return fmt.Errorf("error rendering content for pre-authorization checks: %w", err) } - missingRules, authErr := h.PreAuthorizer.PreAuthorize(ctx, ext, strings.NewReader(tmplRel.Manifest)) - - var preAuthErrors []error - - if len(missingRules) > 0 { - var missingRuleDescriptions []string - for _, policyRules := range missingRules { - for _, rule := range policyRules.MissingRules { - missingRuleDescriptions = append(missingRuleDescriptions, ruleDescription(policyRules.Namespace, rule)) - } - } - slices.Sort(missingRuleDescriptions) - // This phrase is explicitly checked by external testing - preAuthErrors = append(preAuthErrors, fmt.Errorf("service account requires the following permissions to manage cluster extension:\n %s", strings.Join(missingRuleDescriptions, "\n "))) - } - if authErr != nil { - preAuthErrors = append(preAuthErrors, fmt.Errorf("authorization evaluation error: %w", authErr)) - } - if len(preAuthErrors) > 0 { - // This phrase is explicitly checked by external testing - return fmt.Errorf("pre-authorization failed: %v", errors.Join(preAuthErrors...)) - } - return nil + manifestManager := getUserInfo(ext) + return formatPreAuthorizerOutput(h.PreAuthorizer.PreAuthorize(ctx, manifestManager, strings.NewReader(tmplRel.Manifest), extManagementPerms(ext))) } func (h *Helm) Apply(ctx context.Context, contentFS fs.FS, ext *ocv1.ClusterExtension, objectLabels map[string]string, storageLabels map[string]string) (bool, string, error) { @@ -328,3 +309,48 @@ func ruleDescription(ns string, rule rbacv1.PolicyRule) string { } return sb.String() } + +// formatPreAuthorizerOutput formats the output of PreAuthorizer.PreAuthorize calls into a consistent and deterministic error. +// If the call returns no missing rules, and no error, nil is returned. +func formatPreAuthorizerOutput(missingRules []authorization.ScopedPolicyRules, authErr error) error { + var preAuthErrors []error + if len(missingRules) > 0 { + var missingRuleDescriptions []string + for _, policyRules := range missingRules { + for _, rule := range policyRules.MissingRules { + missingRuleDescriptions = append(missingRuleDescriptions, ruleDescription(policyRules.Namespace, rule)) + } + } + slices.Sort(missingRuleDescriptions) + // This phrase is explicitly checked by external testing + preAuthErrors = append(preAuthErrors, fmt.Errorf("service account requires the following permissions to manage cluster extension:\n %s", strings.Join(missingRuleDescriptions, "\n "))) + } + if authErr != nil { + preAuthErrors = append(preAuthErrors, fmt.Errorf("authorization evaluation error: %w", authErr)) + } + if len(preAuthErrors) > 0 { + // This phrase is explicitly checked by external testing + return fmt.Errorf("pre-authorization failed: %v", errors.Join(preAuthErrors...)) + } + return nil +} + +func getUserInfo(ext *ocv1.ClusterExtension) user.Info { + return &user.DefaultInfo{Name: fmt.Sprintf("system:serviceaccount:%s:%s", ext.Spec.Namespace, ext.Spec.ServiceAccount.Name)} +} + +func extManagementPerms(ext *ocv1.ClusterExtension) func(user.Info) []authorizer.AttributesRecord { + return func(user user.Info) []authorizer.AttributesRecord { + return []authorizer.AttributesRecord{ + { + User: user, + Name: ext.Name, + APIGroup: ocv1.GroupVersion.Group, + APIVersion: ocv1.GroupVersion.Version, + Resource: "clusterextensions/finalizers", + ResourceRequest: true, + Verb: "update", + }, + } + } +} diff --git a/internal/operator-controller/applier/helm_test.go b/internal/operator-controller/applier/helm_test.go index 8d07de912..a1332812d 100644 --- a/internal/operator-controller/applier/helm_test.go +++ b/internal/operator-controller/applier/helm_test.go @@ -15,6 +15,9 @@ import ( "helm.sh/helm/v3/pkg/release" "helm.sh/helm/v3/pkg/storage/driver" rbacv1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" "sigs.k8s.io/controller-runtime/pkg/client" helmclient "github.com/operator-framework/helm-operator-plugins/pkg/client" @@ -70,16 +73,11 @@ type mockPreflight struct { } type mockPreAuthorizer struct { - missingRules []authorization.ScopedPolicyRules - returnError error + fn func(context.Context, user.Info, io.Reader, ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) } -func (p *mockPreAuthorizer) PreAuthorize( - ctx context.Context, - ext *ocv1.ClusterExtension, - manifestReader io.Reader, -) ([]authorization.ScopedPolicyRules, error) { - return p.missingRules, p.returnError +func (p *mockPreAuthorizer) PreAuthorize(ctx context.Context, manifestManager user.Info, manifestReader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return p.fn(ctx, manifestManager, manifestReader, additionalRequiredPerms...) } func (mp *mockPreflight) Install(context.Context, []client.Object) error { @@ -193,7 +191,17 @@ metadata: spec: clusterIP: 0.0.0.0` - testCE = &ocv1.ClusterExtension{} + testCE = &ocv1.ClusterExtension{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-ext", + }, + Spec: ocv1.ClusterExtensionSpec{ + Namespace: "test-namespace", + ServiceAccount: ocv1.ServiceAccountReference{ + Name: "test-sa", + }, + }, + } testObjectLabels = map[string]string{"object": "label"} testStorageLabels = map[string]string{"storage": "label"} errPreAuth = errors.New("problem running preauthorization") @@ -345,6 +353,52 @@ func TestApply_Installation(t *testing.T) { } func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { + t.Run("preauthorizer called with correct parameters", func(t *testing.T) { + mockAcg := &mockActionGetter{ + getClientErr: driver.ErrReleaseNotFound, + installErr: errors.New("failed installing chart"), + desiredRel: &release.Release{ + Info: &release.Info{Status: release.StatusDeployed}, + Manifest: validManifest, + }, + } + mockPf := &mockPreflight{installErr: errors.New("failed during install pre-flight check")} + helmApplier := applier.Helm{ + ActionClientGetter: mockAcg, + Preflights: []applier.Preflight{mockPf}, + PreAuthorizer: &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + t.Log("has correct user") + require.Equal(t, "system:serviceaccount:test-namespace:test-sa", user.GetName()) + require.Empty(t, user.GetUID()) + require.Nil(t, user.GetExtra()) + require.Empty(t, user.GetGroups()) + + t.Log("has correct additional permissions") + require.Len(t, additionalRequiredPerms, 1) + perms := additionalRequiredPerms[0](user) + + require.Len(t, perms, 1) + require.Equal(t, authorizer.AttributesRecord{ + User: user, + Name: "test-ext", + APIGroup: "olm.operatorframework.io", + APIVersion: "v1", + Resource: "clusterextensions/finalizers", + ResourceRequest: true, + Verb: "update", + }, perms[0]) + return nil, nil + }, + }, + HelmChartProvider: DummyHelmChartProvider, + HelmReleaseToObjectsConverter: mockHelmReleaseToObjectsConverter{}, + } + + _, _, err := helmApplier.Apply(context.TODO(), validFS, testCE, testObjectLabels, testStorageLabels) + require.Error(t, err) + }) + t.Run("fails during dry-run installation", func(t *testing.T) { mockAcg := &mockActionGetter{ getClientErr: driver.ErrReleaseNotFound, @@ -373,9 +427,13 @@ func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { } mockPf := &mockPreflight{installErr: errors.New("failed during install pre-flight check")} helmApplier := applier.Helm{ - ActionClientGetter: mockAcg, - Preflights: []applier.Preflight{mockPf}, - PreAuthorizer: &mockPreAuthorizer{nil, nil}, + ActionClientGetter: mockAcg, + Preflights: []applier.Preflight{mockPf}, + PreAuthorizer: &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return nil, nil + }, + }, HelmChartProvider: DummyHelmChartProvider, HelmReleaseToObjectsConverter: mockHelmReleaseToObjectsConverter{}, } @@ -397,8 +455,12 @@ func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { } helmApplier := applier.Helm{ ActionClientGetter: mockAcg, - PreAuthorizer: &mockPreAuthorizer{nil, errPreAuth}, - HelmChartProvider: DummyHelmChartProvider, + PreAuthorizer: &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return nil, errPreAuth + }, + }, + HelmChartProvider: DummyHelmChartProvider, } // Use a ClusterExtension with valid Spec fields. validCE := &ocv1.ClusterExtension{ @@ -426,8 +488,12 @@ func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { } helmApplier := applier.Helm{ ActionClientGetter: mockAcg, - PreAuthorizer: &mockPreAuthorizer{missingRBAC, nil}, - HelmChartProvider: DummyHelmChartProvider, + PreAuthorizer: &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return missingRBAC, nil + }, + }, + HelmChartProvider: DummyHelmChartProvider, } // Use a ClusterExtension with valid Spec fields. validCE := &ocv1.ClusterExtension{ @@ -454,8 +520,12 @@ func TestApply_InstallationWithPreflightPermissionsEnabled(t *testing.T) { }, } helmApplier := applier.Helm{ - ActionClientGetter: mockAcg, - PreAuthorizer: &mockPreAuthorizer{nil, nil}, + ActionClientGetter: mockAcg, + PreAuthorizer: &mockPreAuthorizer{ + fn: func(ctx context.Context, user user.Info, reader io.Reader, additionalRequiredPerms ...authorization.UserAuthorizerAttributesFactory) ([]authorization.ScopedPolicyRules, error) { + return nil, nil + }, + }, HelmChartProvider: DummyHelmChartProvider, HelmReleaseToObjectsConverter: mockHelmReleaseToObjectsConverter{}, Manager: &mockManagedContentCacheManager{ diff --git a/internal/operator-controller/authorization/rbac.go b/internal/operator-controller/authorization/rbac.go index 357268615..54662bb08 100644 --- a/internal/operator-controller/authorization/rbac.go +++ b/internal/operator-controller/authorization/rbac.go @@ -31,12 +31,25 @@ import ( "k8s.io/kubernetes/pkg/registry/rbac/validation" rbac "k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac" "sigs.k8s.io/controller-runtime/pkg/client" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" ) +// UserAuthorizerAttributesFactory is a function that produces a slice of AttributesRecord for user +type UserAuthorizerAttributesFactory func(user user.Info) []authorizer.AttributesRecord + type PreAuthorizer interface { - PreAuthorize(ctx context.Context, ext *ocv1.ClusterExtension, manifestReader io.Reader) ([]ScopedPolicyRules, error) + // PreAuthorize validates whether the user satisfies the necessary permissions + // as defined by the RBAC policy. It examines the user’s roles, resource identifiers, and + // the intended action to determine if the operation is allowed. Optional additional required permissions are also evaluated + // against user. + // + // Return Value: + // - nil: indicates that the authorization check passed and the operation is permitted. + // - non-nil error: indicates that an error occurred during the permission evaluation process + // (for example, a failure decoding the manifest or other internal issues). If the evaluation + // completes successfully but identifies missing rules, then a nil error is returned along with + // the list (or slice) of missing rules. Note that in some cases the error may encapsulate multiple + // evaluation failures + PreAuthorize(ctx context.Context, user user.Info, manifestReader io.Reader, additionalRequiredPerms ...UserAuthorizerAttributesFactory) ([]ScopedPolicyRules, error) } type ScopedPolicyRules struct { @@ -68,24 +81,19 @@ func NewRBACPreAuthorizer(cl client.Client) PreAuthorizer { } } -// PreAuthorize validates whether the current user/request satisfies the necessary permissions -// as defined by the RBAC policy. It examines the user’s roles, resource identifiers, and -// the intended action to determine if the operation is allowed. -// -// Return Value: -// - nil: indicates that the authorization check passed and the operation is permitted. -// - non-nil error: indicates that an error occurred during the permission evaluation process -// (for example, a failure decoding the manifest or other internal issues). If the evaluation -// completes successfully but identifies missing rules, then a nil error is returned along with -// the list (or slice) of missing rules. Note that in some cases the error may encapsulate multiple -// evaluation failures -func (a *rbacPreAuthorizer) PreAuthorize(ctx context.Context, ext *ocv1.ClusterExtension, manifestReader io.Reader) ([]ScopedPolicyRules, error) { +func (a *rbacPreAuthorizer) PreAuthorize(ctx context.Context, user user.Info, manifestReader io.Reader, additionalRequiredPerms ...UserAuthorizerAttributesFactory) ([]ScopedPolicyRules, error) { dm, err := a.decodeManifest(manifestReader) if err != nil { return nil, err } - manifestManager := &user.DefaultInfo{Name: fmt.Sprintf("system:serviceaccount:%s:%s", ext.Spec.Namespace, ext.Spec.ServiceAccount.Name)} - attributesRecords := dm.asAuthorizationAttributesRecordsForUser(manifestManager, ext) + + // derive manifest related attributes records + attributesRecords := dm.asAuthorizationAttributesRecordsForUser(user) + + // append additional required perms + for _, fn := range additionalRequiredPerms { + attributesRecords = append(attributesRecords, fn(user)...) + } var preAuthEvaluationErrors []error missingRules, err := a.authorizeAttributesRecords(ctx, attributesRecords) @@ -97,7 +105,7 @@ func (a *rbacPreAuthorizer) PreAuthorize(ctx context.Context, ext *ocv1.ClusterE var parseErrors []error for _, obj := range dm.rbacObjects() { - if err := ec.checkEscalation(ctx, manifestManager, obj); err != nil { + if err := ec.checkEscalation(ctx, user, obj); err != nil { result, err := parseEscalationErrorForMissingRules(err) missingRules[obj.GetNamespace()] = append(missingRules[obj.GetNamespace()], result.MissingRules...) preAuthEvaluationErrors = append(preAuthEvaluationErrors, result.ResolutionErrors) @@ -316,7 +324,7 @@ func (dm *decodedManifest) rbacObjects() []client.Object { return objects } -func (dm *decodedManifest) asAuthorizationAttributesRecordsForUser(manifestManager user.Info, ext *ocv1.ClusterExtension) []authorizer.AttributesRecord { +func (dm *decodedManifest) asAuthorizationAttributesRecordsForUser(manifestManager user.Info) []authorizer.AttributesRecord { var attributeRecords []authorizer.AttributesRecord for gvr, keys := range dm.gvrs { @@ -363,18 +371,6 @@ func (dm *decodedManifest) asAuthorizationAttributesRecordsForUser(manifestManag Verb: v, }) } - - for _, verb := range []string{"update"} { - attributeRecords = append(attributeRecords, authorizer.AttributesRecord{ - User: manifestManager, - Name: ext.Name, - APIGroup: ext.GroupVersionKind().Group, - APIVersion: ext.GroupVersionKind().Version, - Resource: "clusterextensions/finalizers", - ResourceRequest: true, - Verb: verb, - }) - } } return attributeRecords } diff --git a/internal/operator-controller/authorization/rbac_test.go b/internal/operator-controller/authorization/rbac_test.go index 8e3d47687..9d80cc52b 100644 --- a/internal/operator-controller/authorization/rbac_test.go +++ b/internal/operator-controller/authorization/rbac_test.go @@ -14,12 +14,11 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apiserver/pkg/authentication/user" + "k8s.io/apiserver/pkg/authorization/authorizer" "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/kubernetes/pkg/registry/rbac/validation" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - - ocv1 "github.com/operator-framework/operator-controller/api/v1" ) var ( @@ -129,17 +128,9 @@ subjects: namespace: a-test-namespace ` - saName = "test-serviceaccount" - ns = "test-namespace" - exampleClusterExtension = ocv1.ClusterExtension{ - ObjectMeta: metav1.ObjectMeta{Name: "test-cluster-extension"}, - Spec: ocv1.ClusterExtensionSpec{ - Namespace: ns, - ServiceAccount: ocv1.ServiceAccountReference{ - Name: saName, - }, - }, - } + saName = "test-serviceaccount" + ns = "test-namespace" + testUser = &user.DefaultInfo{Name: fmt.Sprintf("system:serviceaccount:%s:%s", ns, saName)} objects = []client.Object{ &corev1.Namespace{ @@ -204,7 +195,7 @@ subjects: Rules: []rbacv1.PolicyRule{ { APIGroups: []string{"*"}, - Resources: []string{"serviceaccounts", "services", "clusterextensions/finalizers"}, + Resources: []string{"serviceaccounts", "services"}, Verbs: []string{"*"}, }, { @@ -236,12 +227,6 @@ subjects: APIGroups: []string{"rbac.authorization.k8s.io"}, Resources: []string{"roles"}, ResourceNames: []string(nil), - NonResourceURLs: []string(nil)}, - { - Verbs: []string{"update"}, - APIGroups: []string{""}, - Resources: []string{"clusterextensions/finalizers"}, - ResourceNames: []string{"test-cluster-extension"}, NonResourceURLs: []string(nil), }, }, @@ -310,12 +295,6 @@ subjects: APIGroups: []string{"rbac.authorization.k8s.io"}, Resources: []string{"roles"}, ResourceNames: []string(nil), - NonResourceURLs: []string(nil)}, - { - Verbs: []string{"update"}, - APIGroups: []string{""}, - Resources: []string{"clusterextensions/finalizers"}, - ResourceNames: []string{"test-cluster-extension"}, NonResourceURLs: []string(nil), }, }, @@ -418,7 +397,7 @@ func TestPreAuthorize_Success(t *testing.T) { t.Run("preauthorize succeeds with no missing rbac rules", func(t *testing.T) { fakeClient := setupFakeClient(privilegedClusterRole) preAuth := NewRBACPreAuthorizer(fakeClient) - missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifest)) + missingRules, err := preAuth.PreAuthorize(context.TODO(), testUser, strings.NewReader(testManifest)) require.NoError(t, err) require.Equal(t, []ScopedPolicyRules{}, missingRules) }) @@ -428,7 +407,7 @@ func TestPreAuthorize_MissingRBAC(t *testing.T) { t.Run("preauthorize fails and finds missing rbac rules", func(t *testing.T) { fakeClient := setupFakeClient(limitedClusterRole) preAuth := NewRBACPreAuthorizer(fakeClient) - missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifest)) + missingRules, err := preAuth.PreAuthorize(context.TODO(), testUser, strings.NewReader(testManifest)) require.NoError(t, err) require.Equal(t, expectedSingleNamespaceMissingRules, missingRules) }) @@ -438,7 +417,7 @@ func TestPreAuthorizeMultiNamespace_MissingRBAC(t *testing.T) { t.Run("preauthorize fails and finds missing rbac rules in multiple namespaces", func(t *testing.T) { fakeClient := setupFakeClient(limitedClusterRole) preAuth := NewRBACPreAuthorizer(fakeClient) - missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifestMultiNamespace)) + missingRules, err := preAuth.PreAuthorize(context.TODO(), testUser, strings.NewReader(testManifestMultiNamespace)) require.NoError(t, err) require.Equal(t, expectedMultiNamespaceMissingRules, missingRules) }) @@ -448,12 +427,44 @@ func TestPreAuthorize_CheckEscalation(t *testing.T) { t.Run("preauthorize succeeds with no missing rbac rules", func(t *testing.T) { fakeClient := setupFakeClient(escalatingClusterRole) preAuth := NewRBACPreAuthorizer(fakeClient) - missingRules, err := preAuth.PreAuthorize(context.TODO(), &exampleClusterExtension, strings.NewReader(testManifest)) + missingRules, err := preAuth.PreAuthorize(context.TODO(), testUser, strings.NewReader(testManifest)) require.NoError(t, err) require.Equal(t, []ScopedPolicyRules{}, missingRules) }) } +func TestPreAuthorize_AdditionalRequiredPerms_MissingRBAC(t *testing.T) { + t.Run("preauthorize fails and finds missing rbac rules coming from the additional required permissions", func(t *testing.T) { + fakeClient := setupFakeClient(escalatingClusterRole) + preAuth := NewRBACPreAuthorizer(fakeClient) + missingRules, err := preAuth.PreAuthorize(context.TODO(), testUser, strings.NewReader(testManifest), func(user user.Info) []authorizer.AttributesRecord { + return []authorizer.AttributesRecord{ + { + User: user, + Verb: "create", + APIGroup: corev1.SchemeGroupVersion.Group, + APIVersion: corev1.SchemeGroupVersion.Version, + Resource: "pods", + ResourceRequest: true, + }, + } + }) + require.NoError(t, err) + require.Equal(t, []ScopedPolicyRules{ + { + Namespace: "", + MissingRules: []rbacv1.PolicyRule{ + { + Verbs: []string{"create"}, + APIGroups: []string{""}, + Resources: []string{"pods"}, + }, + }, + }, + }, missingRules) + }) +} + // TestParseEscalationErrorForMissingRules Are tests with respect to https://github.com/kubernetes/api/blob/e8d4d542f6a9a16a694bfc8e3b8cd1557eecfc9d/rbac/v1/types.go#L49-L74 // Goal is: prove the regex works as planned AND that if the error messages ever change we'll learn about it with these tests func TestParseEscalationErrorForMissingRules_ParsingLogic(t *testing.T) { diff --git a/internal/operator-controller/conditionsets/conditionsets.go b/internal/operator-controller/conditionsets/conditionsets.go index 6c33b1c8f..e72a95c2a 100644 --- a/internal/operator-controller/conditionsets/conditionsets.go +++ b/internal/operator-controller/conditionsets/conditionsets.go @@ -41,4 +41,5 @@ var ConditionReasons = []string{ ocv1.ReasonRetrying, ocv1.ReasonAbsent, ocv1.ReasonRollingOut, + ocv1.ReasonProgressDeadlineExceeded, } diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller.go b/internal/operator-controller/controllers/clusterextensionrevision_controller.go index a2173a2fc..138bcd8eb 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller.go @@ -8,6 +8,7 @@ import ( "errors" "fmt" "strings" + "sync" "time" appsv1 "k8s.io/api/apps/v1" @@ -27,6 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/predicate" @@ -46,6 +48,9 @@ type ClusterExtensionRevisionReconciler struct { Client client.Client RevisionEngineFactory RevisionEngineFactory TrackingCache trackingCache + // track if we have queued up the reconciliation that detects eventual progress deadline issues + // keys is revision UUID, value is boolean + progressDeadlineCheckInFlight sync.Map } type trackingCache interface { @@ -74,6 +79,25 @@ func (c *ClusterExtensionRevisionReconciler) Reconcile(ctx context.Context, req reconciledRev := existingRev.DeepCopy() res, reconcileErr := c.reconcile(ctx, reconciledRev) + if pd := existingRev.Spec.ProgressDeadlineMinutes; pd > 0 { + cnd := meta.FindStatusCondition(reconciledRev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) + isStillProgressing := cnd != nil && cnd.Status == metav1.ConditionTrue && cnd.Reason != ocv1.ReasonSucceeded + succeeded := meta.IsStatusConditionTrue(reconciledRev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeSucceeded) + // check if we reached the progress deadline only if the revision is still progressing and has not succeeded yet + if isStillProgressing && !succeeded { + timeout := time.Duration(pd) * time.Minute + if time.Since(existingRev.CreationTimestamp.Time) > timeout { + // progress deadline reached, reset any errors and stop reconciling this revision + markAsNotProgressing(reconciledRev, ocv1.ReasonProgressDeadlineExceeded, fmt.Sprintf("Revision has not rolled out for %d minutes.", pd)) + reconcileErr = nil + res = ctrl.Result{} + } else if _, found := c.progressDeadlineCheckInFlight.Load(existingRev.GetUID()); !found && reconcileErr == nil { + // if we haven't already queued up a reconcile to check for progress deadline, queue one up, but only once + c.progressDeadlineCheckInFlight.Store(existingRev.GetUID(), true) + res = ctrl.Result{RequeueAfter: timeout} + } + } + } // Do checks before any Update()s, as Update() may modify the resource structure! updateStatus := !equality.Semantic.DeepEqual(existingRev.Status, reconciledRev.Status) @@ -117,7 +141,7 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev } if !rev.DeletionTimestamp.IsZero() || rev.Spec.LifecycleState == ocv1.ClusterExtensionRevisionLifecycleStateArchived { - return c.teardown(ctx, rev, revision) + return c.teardown(ctx, rev) } revVersion := rev.GetAnnotations()[labels.BundleVersionKey] @@ -251,33 +275,7 @@ func (c *ClusterExtensionRevisionReconciler) reconcile(ctx context.Context, rev return ctrl.Result{}, nil } -func (c *ClusterExtensionRevisionReconciler) teardown(ctx context.Context, rev *ocv1.ClusterExtensionRevision, revision *boxcutter.Revision) (ctrl.Result, error) { - l := log.FromContext(ctx) - - revisionEngine, err := c.RevisionEngineFactory.CreateRevisionEngine(ctx, rev) - if err != nil { - markAsAvailableUnknown(rev, ocv1.ClusterExtensionRevisionReasonReconciling, err.Error()) - return ctrl.Result{}, fmt.Errorf("failed to create revision engine for teardown: %v", err) - } - - tres, err := revisionEngine.Teardown(ctx, *revision) - if err != nil { - if tres != nil { - l.V(1).Info("teardown failure report", "report", tres.String()) - } - markAsAvailableUnknown(rev, ocv1.ClusterExtensionRevisionReasonReconciling, err.Error()) - return ctrl.Result{}, fmt.Errorf("revision teardown: %v", err) - } - - // Log detailed teardown reports only in debug mode (V(1)) to reduce verbosity. - l.V(1).Info("teardown report", "report", tres.String()) - if !tres.IsComplete() { - // TODO: If it is not complete, it seems like it would be good to update - // the status in some way to tell the user that the teardown is still - // in progress. - return ctrl.Result{}, nil - } - +func (c *ClusterExtensionRevisionReconciler) teardown(ctx context.Context, rev *ocv1.ClusterExtensionRevision) (ctrl.Result, error) { if err := c.TrackingCache.Free(ctx, rev); err != nil { markAsAvailableUnknown(rev, ocv1.ClusterExtensionRevisionReasonReconciling, err.Error()) return ctrl.Result{}, fmt.Errorf("error stopping informers: %v", err) @@ -299,10 +297,29 @@ type Sourcerer interface { } func (c *ClusterExtensionRevisionReconciler) SetupWithManager(mgr ctrl.Manager) error { + skipProgressDeadlineExceededPredicate := predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + rev, ok := e.ObjectNew.(*ocv1.ClusterExtensionRevision) + if !ok { + return true + } + // allow deletions to happen + if !rev.DeletionTimestamp.IsZero() { + return true + } + if cnd := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing); cnd != nil && cnd.Status == metav1.ConditionFalse && cnd.Reason == ocv1.ReasonProgressDeadlineExceeded { + return false + } + return true + }, + } return ctrl.NewControllerManagedBy(mgr). For( &ocv1.ClusterExtensionRevision{}, - builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + builder.WithPredicates( + predicate.ResourceVersionChangedPredicate{}, + skipProgressDeadlineExceededPredicate, + ), ). WatchesRawSource( c.TrackingCache.Source( diff --git a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go index d613da137..f456976f4 100644 --- a/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go +++ b/internal/operator-controller/controllers/clusterextensionrevision_controller_test.go @@ -697,40 +697,6 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { require.True(t, apierrors.IsNotFound(err)) }, }, - { - name: "set Available:Unknown:Reconciling and surface tear down errors when deleted", - revisionResult: mockRevisionResult{}, - existingObjs: func() []client.Object { - ext := newTestClusterExtension() - rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme) - rev1.Finalizers = []string{ - "olm.operatorframework.io/teardown", - } - rev1.DeletionTimestamp = &metav1.Time{Time: time.Now()} - return []client.Object{rev1, ext} - }, - revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { - return func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { - return nil, fmt.Errorf("some teardown error") - } - }, - expectedErr: "some teardown error", - validate: func(t *testing.T, c client.Client) { - t.Log("cluster revision is not deleted and still contains finalizer") - rev := &ocv1.ClusterExtensionRevision{} - err := c.Get(t.Context(), client.ObjectKey{ - Name: clusterExtensionRevisionName, - }, rev) - require.NoError(t, err) - require.NotContains(t, "olm.operatorframework.io/teardown", rev.Finalizers) - cond := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeAvailable) - require.NotNil(t, cond) - require.Equal(t, metav1.ConditionUnknown, cond.Status) - require.Equal(t, ocv1.ClusterExtensionRevisionReasonReconciling, cond.Reason) - require.Equal(t, "some teardown error", cond.Message) - require.Equal(t, int64(1), cond.ObservedGeneration) - }, - }, { name: "set Available:Unknown:Reconciling and surface tracking cache cleanup errors when deleted", revisionResult: mockRevisionResult{}, @@ -851,32 +817,145 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { require.NotContains(t, rev.Finalizers, "olm.operatorframework.io/teardown") }, }, + } { + t.Run(tc.name, func(t *testing.T) { + // create extension and cluster extension + testClient := fake.NewClientBuilder(). + WithScheme(testScheme). + WithStatusSubresource(&ocv1.ClusterExtensionRevision{}). + WithObjects(tc.existingObjs()...). + Build() + + // reconcile cluster extension revision + mockEngine := &mockRevisionEngine{ + reconcile: func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionReconcileOption) (machinery.RevisionResult, error) { + return tc.revisionResult, nil + }, + teardown: tc.revisionEngineTeardownFn(t), + } + result, err := (&controllers.ClusterExtensionRevisionReconciler{ + Client: testClient, + RevisionEngineFactory: &mockRevisionEngineFactory{engine: mockEngine}, + TrackingCache: &mockTrackingCache{ + client: testClient, + freeFn: tc.trackingCacheFreeFn, + }, + }).Reconcile(t.Context(), ctrl.Request{ + NamespacedName: types.NamespacedName{ + Name: clusterExtensionRevisionName, + }, + }) + + // reconcile cluster extension revision + require.Equal(t, ctrl.Result{}, result) + if tc.expectedErr != "" { + require.Contains(t, err.Error(), tc.expectedErr) + } else { + require.NoError(t, err) + } + + // validate test case + tc.validate(t, testClient) + }) + } +} + +func Test_ClusterExtensionRevisionReconciler_Reconcile_ProgressDeadline(t *testing.T) { + const ( + clusterExtensionRevisionName = "test-ext-1" + ) + + testScheme := newScheme(t) + require.NoError(t, corev1.AddToScheme(testScheme)) + + for _, tc := range []struct { + name string + existingObjs func() []client.Object + revisionResult machinery.RevisionResult + validate func(*testing.T, client.Client) + reconcileErr error + reconcileResult ctrl.Result + }{ { - name: "surfaces revision teardown error when in archived state", - revisionResult: mockRevisionResult{}, + name: "progressing set to false when progress deadline is exceeded", existingObjs: func() []client.Object { ext := newTestClusterExtension() rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme) - rev1.Finalizers = []string{ - "olm.operatorframework.io/teardown", - } - rev1.Spec.LifecycleState = ocv1.ClusterExtensionRevisionLifecycleStateArchived + rev1.Spec.ProgressDeadlineMinutes = 1 + rev1.CreationTimestamp = metav1.NewTime(time.Now().Add(-61 * time.Second)) return []client.Object{rev1, ext} }, - revisionEngineTeardownFn: func(t *testing.T) func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { - return func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionTeardownOption) (machinery.RevisionTeardownResult, error) { - return nil, fmt.Errorf("some teardown error") - } + revisionResult: &mockRevisionResult{ + inTransition: true, }, - expectedErr: "some teardown error", validate: func(t *testing.T, c client.Client) { - t.Log("cluster revision is not deleted and still contains finalizer") rev := &ocv1.ClusterExtensionRevision{} err := c.Get(t.Context(), client.ObjectKey{ Name: clusterExtensionRevisionName, }, rev) require.NoError(t, err) - require.NotContains(t, "olm.operatorframework.io/teardown", rev.Finalizers) + cnd := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) + require.Equal(t, metav1.ConditionFalse, cnd.Status) + require.Equal(t, ocv1.ReasonProgressDeadlineExceeded, cnd.Reason) + }, + }, + { + name: "requeue after progressDeadline time for final progression deadline check", + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme) + rev1.Spec.ProgressDeadlineMinutes = 1 + rev1.CreationTimestamp = metav1.NewTime(time.Now()) + return []client.Object{rev1, ext} + }, + revisionResult: &mockRevisionResult{ + inTransition: true, + }, + reconcileResult: ctrl.Result{RequeueAfter: 1 * time.Minute}, + validate: func(t *testing.T, c client.Client) { + rev := &ocv1.ClusterExtensionRevision{} + err := c.Get(t.Context(), client.ObjectKey{ + Name: clusterExtensionRevisionName, + }, rev) + require.NoError(t, err) + cnd := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) + require.Equal(t, metav1.ConditionTrue, cnd.Status) + require.Equal(t, ocv1.ReasonRollingOut, cnd.Reason) + }, + }, + { + name: "no progression deadline checks on revision recovery", + existingObjs: func() []client.Object { + ext := newTestClusterExtension() + rev1 := newTestClusterExtensionRevision(t, clusterExtensionRevisionName, ext, testScheme) + rev1.Spec.ProgressDeadlineMinutes = 1 + rev1.CreationTimestamp = metav1.NewTime(time.Now().Add(-2 * time.Minute)) + meta.SetStatusCondition(&rev1.Status.Conditions, metav1.Condition{ + Type: ocv1.ClusterExtensionRevisionTypeProgressing, + Status: metav1.ConditionTrue, + Reason: ocv1.ReasonSucceeded, + ObservedGeneration: rev1.Generation, + }) + meta.SetStatusCondition(&rev1.Status.Conditions, metav1.Condition{ + Type: ocv1.ClusterExtensionRevisionTypeSucceeded, + Status: metav1.ConditionTrue, + Reason: ocv1.ReasonSucceeded, + ObservedGeneration: rev1.Generation, + }) + return []client.Object{rev1, ext} + }, + revisionResult: &mockRevisionResult{ + inTransition: true, + }, + validate: func(t *testing.T, c client.Client) { + rev := &ocv1.ClusterExtensionRevision{} + err := c.Get(t.Context(), client.ObjectKey{ + Name: clusterExtensionRevisionName, + }, rev) + require.NoError(t, err) + cnd := meta.FindStatusCondition(rev.Status.Conditions, ocv1.ClusterExtensionRevisionTypeProgressing) + require.Equal(t, metav1.ConditionTrue, cnd.Status) + require.Equal(t, ocv1.ReasonRollingOut, cnd.Reason) }, }, } { @@ -893,30 +972,21 @@ func Test_ClusterExtensionRevisionReconciler_Reconcile_Deletion(t *testing.T) { reconcile: func(ctx context.Context, rev machinerytypes.Revision, opts ...machinerytypes.RevisionReconcileOption) (machinery.RevisionResult, error) { return tc.revisionResult, nil }, - teardown: tc.revisionEngineTeardownFn(t), } result, err := (&controllers.ClusterExtensionRevisionReconciler{ Client: testClient, RevisionEngineFactory: &mockRevisionEngineFactory{engine: mockEngine}, TrackingCache: &mockTrackingCache{ client: testClient, - freeFn: tc.trackingCacheFreeFn, }, }).Reconcile(t.Context(), ctrl.Request{ NamespacedName: types.NamespacedName{ Name: clusterExtensionRevisionName, }, }) + require.Equal(t, tc.reconcileResult, result) + require.Equal(t, tc.reconcileErr, err) - // reconcile cluster extension revision - require.Equal(t, ctrl.Result{}, result) - if tc.expectedErr != "" { - require.Contains(t, err.Error(), tc.expectedErr) - } else { - require.NoError(t, err) - } - - // validate test case tc.validate(t, testClient) }) } @@ -1307,34 +1377,4 @@ func Test_ClusterExtensionRevisionReconciler_getScopedClient_Errors(t *testing.T require.Contains(t, err.Error(), "failed to create revision engine") require.Contains(t, err.Error(), "token getter failed") }) - - t.Run("factory fails during teardown", func(t *testing.T) { - ext := newTestClusterExtension() - rev := newTestClusterExtensionRevision(t, "test-rev", ext, testScheme) - rev.DeletionTimestamp = &metav1.Time{Time: time.Now()} - rev.Finalizers = []string{"olm.operatorframework.io/teardown"} - - testClient := fake.NewClientBuilder(). - WithScheme(testScheme). - WithObjects(ext, rev). - Build() - - failingFactory := &mockRevisionEngineFactory{ - createErr: errors.New("serviceaccount not found"), - } - - reconciler := &controllers.ClusterExtensionRevisionReconciler{ - Client: testClient, - RevisionEngineFactory: failingFactory, - TrackingCache: &mockTrackingCache{client: testClient}, - } - - _, err := reconciler.Reconcile(t.Context(), ctrl.Request{ - NamespacedName: types.NamespacedName{Name: "test-rev"}, - }) - - require.Error(t, err) - require.Contains(t, err.Error(), "failed to create revision engine for teardown") - require.Contains(t, err.Error(), "serviceaccount not found") - }) } diff --git a/manifests/experimental-e2e.yaml b/manifests/experimental-e2e.yaml index 8d56b5ad4..edd046e6a 100644 --- a/manifests/experimental-e2e.yaml +++ b/manifests/experimental-e2e.yaml @@ -778,6 +778,16 @@ spec: x-kubernetes-validations: - message: phases is immutable rule: self == oldSelf || oldSelf.size() == 0 + progressDeadlineMinutes: + description: |- + progressDeadlineMinutes is an optional field that defines the maximum period + of time in minutes after which an installation should be considered failed and + require manual intervention. This functionality is disabled when no value + is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + format: int32 + maximum: 720 + minimum: 10 + type: integer revision: description: |- revision is a required, immutable sequence number representing a specific revision @@ -1052,6 +1062,16 @@ spec: rule: self == oldSelf - message: namespace must be a valid DNS1123 label rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + progressDeadlineMinutes: + description: |- + progressDeadlineMinutes is an optional field that defines the maximum period + of time in minutes after which an installation should be considered failed and + require manual intervention. This functionality is disabled when no value + is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + format: int32 + maximum: 720 + minimum: 10 + type: integer serviceAccount: description: |- serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster diff --git a/manifests/experimental.yaml b/manifests/experimental.yaml index 324a0fe4c..877552395 100644 --- a/manifests/experimental.yaml +++ b/manifests/experimental.yaml @@ -739,6 +739,16 @@ spec: x-kubernetes-validations: - message: phases is immutable rule: self == oldSelf || oldSelf.size() == 0 + progressDeadlineMinutes: + description: |- + progressDeadlineMinutes is an optional field that defines the maximum period + of time in minutes after which an installation should be considered failed and + require manual intervention. This functionality is disabled when no value + is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + format: int32 + maximum: 720 + minimum: 10 + type: integer revision: description: |- revision is a required, immutable sequence number representing a specific revision @@ -1013,6 +1023,16 @@ spec: rule: self == oldSelf - message: namespace must be a valid DNS1123 label rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + progressDeadlineMinutes: + description: |- + progressDeadlineMinutes is an optional field that defines the maximum period + of time in minutes after which an installation should be considered failed and + require manual intervention. This functionality is disabled when no value + is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + format: int32 + maximum: 720 + minimum: 10 + type: integer serviceAccount: description: |- serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster diff --git a/openshift/operator-controller/manifests-experimental.yaml b/openshift/operator-controller/manifests-experimental.yaml index d785abfd0..4a8151592 100644 --- a/openshift/operator-controller/manifests-experimental.yaml +++ b/openshift/operator-controller/manifests-experimental.yaml @@ -248,6 +248,16 @@ spec: rule: self == oldSelf - message: namespace must be a valid DNS1123 label rule: self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?$") + progressDeadlineMinutes: + description: |- + progressDeadlineMinutes is an optional field that defines the maximum period + of time in minutes after which an installation should be considered failed and + require manual intervention. This functionality is disabled when no value + is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours). + format: int32 + maximum: 720 + minimum: 10 + type: integer serviceAccount: description: |- serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster diff --git a/requirements.txt b/requirements.txt index eedc5ed11..140b20bb4 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ Babel==2.17.0 beautifulsoup4==4.14.3 -certifi==2025.11.12 +certifi==2026.1.4 charset-normalizer==3.4.4 click==8.3.1 colorama==0.4.6 @@ -18,7 +18,7 @@ mkdocs-material==9.7.1 mkdocs-material-extensions==1.3.1 packaging==25.0 paginate==0.5.7 -pathspec==0.12.1 +pathspec==1.0.1 platformdirs==4.5.1 Pygments==2.19.2 pymdown-extensions==10.20 diff --git a/test/e2e/features/install.feature b/test/e2e/features/install.feature index ba59ffe7d..ab87b5f31 100644 --- a/test/e2e/features/install.feature +++ b/test/e2e/features/install.feature @@ -297,3 +297,35 @@ Feature: Install ClusterExtension valid: true mutate: true """ + + @BoxcutterRuntime + @ProgressDeadline + Scenario: Report ClusterExtension as not progressing if the rollout does not complete within given timeout + Given min value for ClusterExtension .spec.progressDeadlineMinutes is set to 1 + And min value for ClusterExtensionRevision .spec.progressDeadlineMinutes is set to 1 + When ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + progressDeadlineMinutes: 1 + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + version: 1.0.3 + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + Then ClusterExtensionRevision "${NAME}-1" reports Progressing as False with Reason ProgressDeadlineExceeded + And ClusterExtension reports Progressing as False with Reason ProgressDeadlineExceeded and Message: + """ + Revision has not rolled out for 1 minutes. + """ + And ClusterExtension reports Progressing transition between 1 and 2 minutes since its creation diff --git a/test/e2e/features/recover.feature b/test/e2e/features/recover.feature index 0438f2d1a..bf500e997 100644 --- a/test/e2e/features/recover.feature +++ b/test/e2e/features/recover.feature @@ -4,7 +4,6 @@ Feature: Recover cluster extension from errors that might occur during its lifet Given OLM is available And ClusterCatalog "test" serves bundles - Scenario: Restore removed resource Given ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} And ClusterExtension is applied @@ -115,3 +114,38 @@ Feature: Recover cluster extension from errors that might occur during its lifet Then ClusterExtension is available And ClusterExtension reports Progressing as True with Reason Succeeded And ClusterExtension reports Installed as True + + @PreflightPermissions + Scenario: ClusterExtension installation succeeds after service account gets the required missing permissions to + manage the bundle's resources + Given ServiceAccount "olm-sa" is available in ${TEST_NAMESPACE} + And ClusterExtension is applied + """ + apiVersion: olm.operatorframework.io/v1 + kind: ClusterExtension + metadata: + name: ${NAME} + spec: + namespace: ${TEST_NAMESPACE} + serviceAccount: + name: olm-sa + source: + sourceType: Catalog + catalog: + packageName: test + selector: + matchLabels: + "olm.operatorframework.io/metadata.name": test-catalog + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message includes: + """ + error for resolved bundle "test-operator.1.2.0" with version "1.2.0": creating new Revision: pre-authorization failed: service account requires the following permissions to manage cluster extension: + """ + And ClusterExtension reports Progressing as True with Reason Retrying and Message includes: + """ + Namespace:"" APIGroups:[apiextensions.k8s.io] Resources:[customresourcedefinitions] ResourceNames:[olme2etests.olm.operatorframework.io] Verbs:[delete,get,patch,update] + """ + When ServiceAccount "olm-sa" with needed permissions is available in ${TEST_NAMESPACE} + Then ClusterExtension is available + And ClusterExtension reports Progressing as True with Reason Succeeded + And ClusterExtension reports Installed as True diff --git a/test/e2e/steps/steps.go b/test/e2e/steps/steps.go index e1a55934d..9ae772ce8 100644 --- a/test/e2e/steps/steps.go +++ b/test/e2e/steps/steps.go @@ -26,6 +26,7 @@ import ( "github.com/stretchr/testify/require" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/utils/ptr" @@ -57,9 +58,11 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)ClusterExtension is rolled out$`, ClusterExtensionIsRolledOut) sc.Step(`^(?i)ClusterExtension reports "([^"]+)" as active revision(s?)$`, ClusterExtensionReportsActiveRevisions) sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message:$`, ClusterExtensionReportsCondition) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+) and Message includes:$`, ClusterExtensionReportsConditionWithMessageFragment) sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutMsg) sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) as ([[:alnum:]]+)$`, ClusterExtensionReportsConditionWithoutReason) sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" reports ([[:alnum:]]+) as ([[:alnum:]]+) with Reason ([[:alnum:]]+)$`, ClusterExtensionRevisionReportsConditionWithoutMsg) + sc.Step(`^(?i)ClusterExtension reports ([[:alnum:]]+) transition between (\d+) and (\d+) minutes since its creation$`, ClusterExtensionReportsConditionTransitionTime) sc.Step(`^(?i)ClusterExtensionRevision "([^"]+)" is archived$`, ClusterExtensionRevisionIsArchived) sc.Step(`^(?i)resource "([^"]+)" is installed$`, ResourceAvailable) @@ -75,6 +78,7 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in test namespace$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace) sc.Step(`^(?i)ServiceAccount "([^"]*)" with needed permissions is available in \${TEST_NAMESPACE}$`, ServiceAccountWithNeededPermissionsIsAvailableInNamespace) + sc.Step(`^(?i)ServiceAccount "([^"]*)" is available in \${TEST_NAMESPACE}$`, ServiceAccountIsAvailableInNamespace) sc.Step(`^(?i)ServiceAccount "([^"]*)" in test namespace is cluster admin$`, ServiceAccountWithClusterAdminPermissionsIsAvailableInNamespace) sc.Step(`^(?i)ServiceAccount "([^"]+)" in test namespace has permissions to fetch "([^"]+)" metrics$`, ServiceAccountWithFetchMetricsPermissions) sc.Step(`^(?i)ServiceAccount "([^"]+)" sends request to "([^"]+)" endpoint of "([^"]+)" service$`, SendMetricsRequest) @@ -88,6 +92,8 @@ func RegisterSteps(sc *godog.ScenarioContext) { sc.Step(`^(?i)operator "([^"]+)" target namespace is "([^"]+)"$`, OperatorTargetNamespace) sc.Step(`^(?i)Prometheus metrics are returned in the response$`, PrometheusMetricsAreReturned) + + sc.Step(`^(?i)min value for (ClusterExtension|ClusterExtensionRevision) ((?:\.[a-zA-Z]+)+) is set to (\d+)$`, SetCRDFieldMinValue) } func init() { @@ -169,13 +175,7 @@ func substituteScenarioVars(content string, sc *scenarioContext) string { if v, found := os.LookupEnv("CATALOG_IMG"); found { vars["CATALOG_IMG"] = v } - m := func(k string) string { - if v, found := vars[k]; found { - return v - } - return "" - } - return os.Expand(content, m) + return templateContent(content, vars) } func ResourceApplyFails(ctx context.Context, errMsg string, yamlTemplate *godog.DocString) error { @@ -267,24 +267,28 @@ func waitFor(ctx context.Context, conditionFn func() bool) { require.Eventually(godog.T(ctx), conditionFn, timeout, tick) } -func waitForCondition(ctx context.Context, resourceType, resourceName, conditionType, conditionStatus string, conditionReason *string, msg *string) error { +type msgMatchFn func(string) bool + +func alwaysMatch(_ string) bool { return true } + +func waitForCondition(ctx context.Context, resourceType, resourceName, conditionType, conditionStatus string, conditionReason *string, msgCmp msgMatchFn) error { require.Eventually(godog.T(ctx), func() bool { v, err := k8sClient("get", resourceType, resourceName, "-o", fmt.Sprintf("jsonpath={.status.conditions[?(@.type==\"%s\")]}", conditionType)) if err != nil { return false } - var condition map[string]interface{} + var condition metav1.Condition if err := json.Unmarshal([]byte(v), &condition); err != nil { return false } - if condition["status"] != conditionStatus { + if condition.Status != metav1.ConditionStatus(conditionStatus) { return false } - if conditionReason != nil && condition["reason"] != *conditionReason { + if conditionReason != nil && condition.Reason != *conditionReason { return false } - if msg != nil && condition["message"] != *msg { + if msgCmp != nil && !msgCmp(condition.Message) { return false } @@ -293,17 +297,31 @@ func waitForCondition(ctx context.Context, resourceType, resourceName, condition return nil } -func waitForExtensionCondition(ctx context.Context, conditionType, conditionStatus string, conditionReason *string, msg *string) error { +func waitForExtensionCondition(ctx context.Context, conditionType, conditionStatus string, conditionReason *string, msgCmp msgMatchFn) error { sc := scenarioCtx(ctx) - return waitForCondition(ctx, "clusterextension", sc.clusterExtensionName, conditionType, conditionStatus, conditionReason, msg) + return waitForCondition(ctx, "clusterextension", sc.clusterExtensionName, conditionType, conditionStatus, conditionReason, msgCmp) } func ClusterExtensionReportsCondition(ctx context.Context, conditionType, conditionStatus, conditionReason string, msg *godog.DocString) error { - var conditionMsg *string + msgCmp := alwaysMatch if msg != nil { - conditionMsg = ptr.To(substituteScenarioVars(strings.Join(strings.Fields(msg.Content), " "), scenarioCtx(ctx))) + expectedMsg := substituteScenarioVars(strings.Join(strings.Fields(msg.Content), " "), scenarioCtx(ctx)) + msgCmp = func(actual string) bool { + return actual == expectedMsg + } } - return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, conditionMsg) + return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, msgCmp) +} + +func ClusterExtensionReportsConditionWithMessageFragment(ctx context.Context, conditionType, conditionStatus, conditionReason string, msgFragment *godog.DocString) error { + msgCmp := alwaysMatch + if msgFragment != nil { + expectedMsgFragment := substituteScenarioVars(strings.Join(strings.Fields(msgFragment.Content), " "), scenarioCtx(ctx)) + msgCmp = func(actualMsg string) bool { + return strings.Contains(actualMsg, expectedMsgFragment) + } + } + return waitForExtensionCondition(ctx, conditionType, conditionStatus, &conditionReason, msgCmp) } func ClusterExtensionReportsConditionWithoutMsg(ctx context.Context, conditionType, conditionStatus, conditionReason string) error { @@ -314,6 +332,36 @@ func ClusterExtensionReportsConditionWithoutReason(ctx context.Context, conditio return waitForExtensionCondition(ctx, conditionType, conditionStatus, nil, nil) } +func ClusterExtensionReportsConditionTransitionTime(ctx context.Context, conditionType string, minMinutes, maxMinutes int) error { + sc := scenarioCtx(ctx) + t := godog.T(ctx) + + // Get the ClusterExtension's creation timestamp and condition's lastTransitionTime + v, err := k8sClient("get", "clusterextension", sc.clusterExtensionName, "-o", + fmt.Sprintf("jsonpath={.metadata.creationTimestamp},{.status.conditions[?(@.type==\"%s\")].lastTransitionTime}", conditionType)) + require.NoError(t, err) + + parts := strings.Split(v, ",") + require.Len(t, parts, 2, "expected creationTimestamp and lastTransitionTime but got: %s", v) + + creationTimestamp, err := time.Parse(time.RFC3339, parts[0]) + require.NoError(t, err, "failed to parse creationTimestamp") + + lastTransitionTime, err := time.Parse(time.RFC3339, parts[1]) + require.NoError(t, err, "failed to parse lastTransitionTime") + + transitionDuration := lastTransitionTime.Sub(creationTimestamp) + minDuration := time.Duration(minMinutes) * time.Minute + maxDuration := time.Duration(maxMinutes) * time.Minute + + require.GreaterOrEqual(t, transitionDuration, minDuration, + "condition %s transitioned too early: %v since creation (expected >= %v)", conditionType, transitionDuration, minDuration) + require.LessOrEqual(t, transitionDuration, maxDuration, + "condition %s transitioned too late: %v since creation (expected <= %v)", conditionType, transitionDuration, maxDuration) + + return nil +} + func ClusterExtensionReportsActiveRevisions(ctx context.Context, rawRevisionNames string) error { sc := scenarioCtx(ctx) expectedRevisionNames := sets.New[string]() @@ -457,34 +505,45 @@ func ResourceRestored(ctx context.Context, resource string) error { return nil } -func applyPermissionsToServiceAccount(ctx context.Context, serviceAccount, rbacTemplate string, keyValue ...string) error { +func applyServiceAccount(ctx context.Context, serviceAccount string) error { sc := scenarioCtx(ctx) - yamlContent, err := os.ReadFile(filepath.Join("steps", "testdata", rbacTemplate)) + vars := extendMap(map[string]string{ + "TEST_NAMESPACE": sc.namespace, + "SERVICE_ACCOUNT_NAME": serviceAccount, + "SERVICEACCOUNT_NAME": serviceAccount, + }) + + yaml, err := templateYaml(filepath.Join("steps", "testdata", "serviceaccount-template.yaml"), vars) if err != nil { - return fmt.Errorf("failed to read RBAC template yaml: %v", err) + return fmt.Errorf("failed to template ServiceAccount yaml: %v", err) } - vars := map[string]string{ + // Apply the ServiceAccount configuration + _, err = k8scliWithInput(yaml, "apply", "-f", "-") + if err != nil { + return fmt.Errorf("failed to apply ServiceAccount configuration: %v: %s", err, stderrOutput(err)) + } + + return nil +} + +func applyPermissionsToServiceAccount(ctx context.Context, serviceAccount, rbacTemplate string, keyValue ...string) error { + sc := scenarioCtx(ctx) + if err := applyServiceAccount(ctx, serviceAccount); err != nil { + return err + } + vars := extendMap(map[string]string{ "TEST_NAMESPACE": sc.namespace, "SERVICE_ACCOUNT_NAME": serviceAccount, "SERVICEACCOUNT_NAME": serviceAccount, "CLUSTER_EXTENSION_NAME": sc.clusterExtensionName, "CLUSTEREXTENSION_NAME": sc.clusterExtensionName, - } - if len(keyValue) > 0 { - for i := 0; i < len(keyValue); i += 2 { - vars[keyValue[i]] = keyValue[i+1] - } - } - m := func(k string) string { - if v, found := vars[k]; found { - return v - } - return "" - } + }, keyValue...) - // Replace template variables - yaml := os.Expand(string(yamlContent), m) + yaml, err := templateYaml(filepath.Join("steps", "testdata", rbacTemplate), vars) + if err != nil { + return fmt.Errorf("failed to template RBAC yaml: %v", err) + } // Apply the RBAC configuration _, err = k8scliWithInput(yaml, "apply", "-f", "-") @@ -495,6 +554,10 @@ func applyPermissionsToServiceAccount(ctx context.Context, serviceAccount, rbacT return nil } +func ServiceAccountIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { + return applyServiceAccount(ctx, serviceAccount) +} + func ServiceAccountWithNeededPermissionsIsAvailableInNamespace(ctx context.Context, serviceAccount string) error { return applyPermissionsToServiceAccount(ctx, serviceAccount, "rbac-template.yaml") } @@ -730,3 +793,63 @@ func MarkTestOperatorNotReady(ctx context.Context, state string) error { _, err = k8sClient("exec", podName, "-n", sc.namespace, "--", op, "/var/www/ready") return err } + +// SetCRDFieldMinValue patches a CRD to set the minimum value for a field. +// jsonPath is in the format ".spec.fieldName" and gets converted to the CRD schema path. +func SetCRDFieldMinValue(_ context.Context, resourceType, jsonPath string, minValue int) error { + var crdName string + switch resourceType { + case "ClusterExtension": + crdName = "clusterextensions.olm.operatorframework.io" + case "ClusterExtensionRevision": + crdName = "clusterextensionrevisions.olm.operatorframework.io" + default: + return fmt.Errorf("unsupported resource type: %s", resourceType) + } + + // Convert JSON path like ".spec.progressDeadlineMinutes" to CRD schema path + // e.g., ".spec.progressDeadlineMinutes" -> "properties/spec/properties/progressDeadlineMinutes" + parts := strings.Split(strings.TrimPrefix(jsonPath, "."), ".") + schemaParts := make([]string, 0, 2*len(parts)) + for _, part := range parts { + schemaParts = append(schemaParts, "properties", part) + } + patchPath := fmt.Sprintf("/spec/versions/0/schema/openAPIV3Schema/%s/minimum", strings.Join(schemaParts, "/")) + + patch := fmt.Sprintf(`[{"op": "replace", "path": "%s", "value": %d}]`, patchPath, minValue) + _, err := k8sClient("patch", "crd", crdName, "--type=json", "-p", patch) + return err +} + +// templateYaml applies values to the template located in templatePath and returns the result or any errors reading +// the template file +func templateYaml(templatePath string, values map[string]string) (string, error) { + yamlContent, err := os.ReadFile(templatePath) + if err != nil { + return "", fmt.Errorf("failed to read template file '%s': %v", templatePath, err) + } + return templateContent(string(yamlContent), values), nil +} + +// templateContent applies values to content and returns the result +func templateContent(content string, values map[string]string) string { + m := func(k string) string { + if v, found := values[k]; found { + return v + } + return "" + } + + // Replace template variables + return os.Expand(content, m) +} + +// extendMap extends m with the key/values in keyValue, which is expected to be of even size +func extendMap(m map[string]string, keyValue ...string) map[string]string { + if len(keyValue) > 0 { + for i := 0; i < len(keyValue); i += 2 { + m[keyValue[i]] = keyValue[i+1] + } + } + return m +} diff --git a/test/e2e/steps/testdata/rbac-template.yaml b/test/e2e/steps/testdata/rbac-template.yaml index d975d7698..90138b9c6 100644 --- a/test/e2e/steps/testdata/rbac-template.yaml +++ b/test/e2e/steps/testdata/rbac-template.yaml @@ -4,12 +4,6 @@ kind: Namespace metadata: name: ${TEST_NAMESPACE} --- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: ${SERVICEACCOUNT_NAME} - namespace: ${TEST_NAMESPACE} ---- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: @@ -23,7 +17,6 @@ rules: - apiGroups: [olm.operatorframework.io] resources: [clusterextensionrevisions, clusterextensionrevisions/finalizers] verbs: [update, create, list, watch, get, delete, patch] - - apiGroups: [apiextensions.k8s.io] resources: [customresourcedefinitions] verbs: [update, create, list, watch, get, delete, patch] @@ -60,8 +53,6 @@ rules: - apiGroups: ["authentication.k8s.io"] resources: ["tokenreviews"] verbs: [create] - - --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding diff --git a/test/e2e/steps/testdata/serviceaccount-template.yaml b/test/e2e/steps/testdata/serviceaccount-template.yaml new file mode 100644 index 000000000..5d1c46449 --- /dev/null +++ b/test/e2e/steps/testdata/serviceaccount-template.yaml @@ -0,0 +1,11 @@ +--- +apiVersion: v1 +kind: Namespace +metadata: + name: ${TEST_NAMESPACE} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: ${SERVICEACCOUNT_NAME} + namespace: ${TEST_NAMESPACE} \ No newline at end of file diff --git a/testdata/images/bundles/test-operator/v1.0.3/manifests/bundle.configmap.yaml b/testdata/images/bundles/test-operator/v1.0.3/manifests/bundle.configmap.yaml new file mode 100644 index 000000000..43b73e2c7 --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.0.3/manifests/bundle.configmap.yaml @@ -0,0 +1,7 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: wrong-test-configmap +# need such config map with a wrong field so that we can reach progression deadline timeouts +wrongfield: + name: "test-configmap" diff --git a/testdata/images/bundles/test-operator/v1.0.3/manifests/testoperator.clusterserviceversion.yaml b/testdata/images/bundles/test-operator/v1.0.3/manifests/testoperator.clusterserviceversion.yaml new file mode 100644 index 000000000..a33f5a6c6 --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.0.3/manifests/testoperator.clusterserviceversion.yaml @@ -0,0 +1,151 @@ +apiVersion: operators.coreos.com/v1alpha1 +kind: ClusterServiceVersion +metadata: + annotations: + alm-examples: |- + [ + { + "apiVersion": "olme2etests.olm.operatorframework.io/v1", + "kind": "OLME2ETests", + "metadata": { + "labels": { + "app.kubernetes.io/managed-by": "kustomize", + "app.kubernetes.io/name": "test" + }, + "name": "test-sample" + }, + "spec": null + } + ] + capabilities: Basic Install + createdAt: "2024-10-24T19:21:40Z" + operators.operatorframework.io/builder: operator-sdk-v1.34.1 + operators.operatorframework.io/project_layout: go.kubebuilder.io/v4 + name: testoperator.v1.0.2 + namespace: placeholder +spec: + apiservicedefinitions: {} + customresourcedefinitions: + owned: + - description: Configures subsections of Alertmanager configuration specific to each namespace + displayName: OLME2ETest + kind: OLME2ETest + name: olme2etests.olm.operatorframework.io + version: v1 + description: OLM E2E Testing Operator with a wrong image ref + displayName: test-operator + icon: + - base64data: "" + mediatype: "" + install: + spec: + deployments: + - label: + app.kubernetes.io/component: controller + app.kubernetes.io/name: test-operator + app.kubernetes.io/version: 1.0.2 + name: test-operator + spec: + replicas: 1 + selector: + matchLabels: + app: olme2etest + template: + metadata: + labels: + app: olme2etest + spec: + terminationGracePeriodSeconds: 0 + volumes: + - name: scripts + configMap: + name: httpd-script + defaultMode: 0755 + containers: + - name: busybox-httpd-container + # This image ref is wrong and should trigger ImagePullBackOff condition + image: busybox:1.36 + serviceAccountName: simple-bundle-manager + clusterPermissions: + - rules: + - apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create + - apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create + serviceAccountName: simple-bundle-manager + permissions: + - rules: + - apiGroups: + - "" + resources: + - configmaps + - serviceaccounts + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - networking.k8s.io + resources: + - networkpolicies + verbs: + - get + - list + - create + - update + - delete + - apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: simple-bundle-manager + strategy: deployment + installModes: + - supported: false + type: OwnNamespace + - supported: true + type: SingleNamespace + - supported: false + type: MultiNamespace + - supported: true + type: AllNamespaces + keywords: + - registry + links: + - name: simple-bundle + url: https://simple-bundle.domain + maintainers: + - email: main#simple-bundle.domain + name: Simple Bundle + maturity: beta + provider: + name: Simple Bundle + url: https://simple-bundle.domain + version: 1.0.2 diff --git a/testdata/images/bundles/test-operator/v1.0.3/metadata/annotations.yaml b/testdata/images/bundles/test-operator/v1.0.3/metadata/annotations.yaml new file mode 100644 index 000000000..404f0f4a3 --- /dev/null +++ b/testdata/images/bundles/test-operator/v1.0.3/metadata/annotations.yaml @@ -0,0 +1,10 @@ +annotations: + # Core bundle annotations. + operators.operatorframework.io.bundle.mediatype.v1: registry+v1 + operators.operatorframework.io.bundle.manifests.v1: manifests/ + operators.operatorframework.io.bundle.metadata.v1: metadata/ + operators.operatorframework.io.bundle.package.v1: test + operators.operatorframework.io.bundle.channels.v1: beta + operators.operatorframework.io.metrics.builder: operator-sdk-v1.28.0 + operators.operatorframework.io.metrics.mediatype.v1: metrics+v1 + operators.operatorframework.io.metrics.project_layout: unknown diff --git a/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml b/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml index 111c75f42..012afbe83 100644 --- a/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml +++ b/testdata/images/catalogs/test-catalog/v1/configs/catalog.yaml @@ -8,6 +8,7 @@ package: test entries: - name: test-operator.1.0.0 - name: test-operator.1.0.2 + - name: test-operator.1.0.3 --- schema: olm.channel name: beta @@ -50,6 +51,17 @@ properties: packageName: test version: 1.0.2 --- +# Bundle with an invalid config map ensure that we can never successfully rollout - used to test progression deadline timeouts +schema: olm.bundle +name: test-operator.1.0.3 +package: test +image: docker-registry.operator-controller-e2e.svc.cluster.local:5000/bundles/registry-v1/test-operator:v1.0.3 +properties: + - type: olm.package + value: + packageName: test + version: 1.0.3 +--- schema: olm.bundle name: test-operator.1.2.0 package: test