From 17799a171871fe2dfcd6d2144b6cdb156c01329d Mon Sep 17 00:00:00 2001 From: sivaram-mongodb Date: Mon, 12 Jan 2026 20:33:42 +0530 Subject: [PATCH 01/10] feat: add Stream Processor resource --- cfn-resources/stream-processor/.rpdk-config | 27 + cfn-resources/stream-processor/Makefile | 41 ++ cfn-resources/stream-processor/README.md | 144 +++++ cfn-resources/stream-processor/cmd/main.go | 85 +++ .../cmd/resource/callbacks.go | 255 +++++++++ .../cmd/resource/callbacks_test.go | 481 ++++++++++++++++ .../stream-processor/cmd/resource/config.go | 19 + .../stream-processor/cmd/resource/helpers.go | 211 +++++++ .../cmd/resource/helpers_test.go | 325 +++++++++++ .../stream-processor/cmd/resource/mappings.go | 185 +++++++ .../cmd/resource/mappings_test.go | 375 +++++++++++++ .../stream-processor/cmd/resource/model.go | 37 ++ .../stream-processor/cmd/resource/resource.go | 446 +++++++++++++++ .../cmd/resource/resource_test.go | 513 ++++++++++++++++++ cfn-resources/stream-processor/docs/README.md | 179 ++++++ .../stream-processor/docs/streamsdlq.md | 58 ++ .../stream-processor/docs/streamsoptions.md | 34 ++ .../stream-processor/docs/timeouts.md | 34 ++ .../mongodb-atlas-streamprocessor.json | 147 +++++ .../stream-processor/resource-role.yaml | 38 ++ cfn-resources/stream-processor/template.yml | 27 + cfn-resources/stream-processor/test/README.md | 161 ++++++ .../test/cfn-test-create-inputs.sh | 311 +++++++++++ .../test/cfn-test-delete-inputs.sh | 92 ++++ .../test/contract-testing/cfn-test-create.sh | 16 + .../test/contract-testing/cfn-test-delete.sh | 15 + .../test/inputs_1_create.template.json | 8 + .../test/inputs_1_update.template.json | 8 + .../test/inputs_2_create.template.json | 12 + .../test/inputs_2_update.template.json | 8 + .../test/inputs_3_create.template.json | 16 + .../test/inputs_3_update.template.json | 16 + .../test/inputs_4_create.template.json | 9 + .../test/inputs_4_update.template.json | 9 + .../test/inputs_5_create.template.json | 9 + .../test/inputs_5_update.template.json | 9 + .../stream-processor.sample-cfn-request.json | 11 + cfn-resources/util/constants/constants.go | 3 + .../atlas-streams/stream-processor/README.md | 191 +++++++ .../stream-processor-cluster-to-kafka.json | 93 ++++ .../stream-processor-dlq.json | 129 +++++ .../stream-processor-kafka-to-cluster.json | 109 ++++ .../stream-processor/stream-processor.json | 102 ++++ 43 files changed, 4998 insertions(+) create mode 100644 cfn-resources/stream-processor/.rpdk-config create mode 100644 cfn-resources/stream-processor/Makefile create mode 100644 cfn-resources/stream-processor/README.md create mode 100644 cfn-resources/stream-processor/cmd/main.go create mode 100644 cfn-resources/stream-processor/cmd/resource/callbacks.go create mode 100644 cfn-resources/stream-processor/cmd/resource/callbacks_test.go create mode 100644 cfn-resources/stream-processor/cmd/resource/config.go create mode 100644 cfn-resources/stream-processor/cmd/resource/helpers.go create mode 100644 cfn-resources/stream-processor/cmd/resource/helpers_test.go create mode 100644 cfn-resources/stream-processor/cmd/resource/mappings.go create mode 100644 cfn-resources/stream-processor/cmd/resource/mappings_test.go create mode 100644 cfn-resources/stream-processor/cmd/resource/model.go create mode 100644 cfn-resources/stream-processor/cmd/resource/resource.go create mode 100644 cfn-resources/stream-processor/cmd/resource/resource_test.go create mode 100644 cfn-resources/stream-processor/docs/README.md create mode 100644 cfn-resources/stream-processor/docs/streamsdlq.md create mode 100644 cfn-resources/stream-processor/docs/streamsoptions.md create mode 100644 cfn-resources/stream-processor/docs/timeouts.md create mode 100644 cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json create mode 100644 cfn-resources/stream-processor/resource-role.yaml create mode 100644 cfn-resources/stream-processor/template.yml create mode 100644 cfn-resources/stream-processor/test/README.md create mode 100755 cfn-resources/stream-processor/test/cfn-test-create-inputs.sh create mode 100755 cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh create mode 100755 cfn-resources/stream-processor/test/contract-testing/cfn-test-create.sh create mode 100755 cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh create mode 100644 cfn-resources/stream-processor/test/inputs_1_create.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_1_update.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_2_create.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_2_update.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_3_create.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_3_update.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_4_create.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_4_update.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_5_create.template.json create mode 100644 cfn-resources/stream-processor/test/inputs_5_update.template.json create mode 100644 cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json create mode 100644 examples/atlas-streams/stream-processor/README.md create mode 100644 examples/atlas-streams/stream-processor/stream-processor-cluster-to-kafka.json create mode 100644 examples/atlas-streams/stream-processor/stream-processor-dlq.json create mode 100644 examples/atlas-streams/stream-processor/stream-processor-kafka-to-cluster.json create mode 100644 examples/atlas-streams/stream-processor/stream-processor.json diff --git a/cfn-resources/stream-processor/.rpdk-config b/cfn-resources/stream-processor/.rpdk-config new file mode 100644 index 000000000..40acec48b --- /dev/null +++ b/cfn-resources/stream-processor/.rpdk-config @@ -0,0 +1,27 @@ +{ + "artifact_type": "RESOURCE", + "typeName": "MongoDB::Atlas::StreamProcessor", + "language": "go", + "runtime": "provided.al2", + "entrypoint": "bootstrap", + "testEntrypoint": "bootstrap", + "settings": { + "version": false, + "subparser_name": null, + "verbose": 0, + "force": false, + "type_name": "MongoDB::Atlas::StreamProcessor", + "artifact_type": "r", + "endpoint_url": null, + "region": null, + "target_schemas": [], + "profile": null, + "import_path": "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor", + "protocolVersion": "2.0.0" + }, + "canarySettings": { + "contract_test_file_names": [ + "inputs_1.json" + ] + } +} diff --git a/cfn-resources/stream-processor/Makefile b/cfn-resources/stream-processor/Makefile new file mode 100644 index 000000000..b4c27f329 --- /dev/null +++ b/cfn-resources/stream-processor/Makefile @@ -0,0 +1,41 @@ +.PHONY: build debug test clean +tags=logging callback metrics scheduler +cgo=0 +goos=linux +goarch=amd64 +CFNREP_GIT_SHA?=$(shell git rev-parse HEAD) +ldXflags=-s -w -X github.com/mongodb/mongodbatlas-cloudformation-resources/util.defaultLogLevel=info -X github.com/mongodb/mongodbatlas-cloudformation-resources/version.Version=${CFNREP_GIT_SHA} +ldXflagsD=-X github.com/mongodb/mongodbatlas-cloudformation-resources/util.defaultLogLevel=debug -X github.com/mongodb/mongodbatlas-cloudformation-resources/version.Version=${CFNREP_GIT_SHA} + +build: + cfn generate + env GOOS=$(goos) CGO_ENABLED=$(cgo) GOARCH=$(goarch) go build -ldflags="$(ldXflags)" -tags="$(tags)" -o bin/bootstrap cmd/main.go + +debug: + cfn generate + env GOOS=$(goos) CGO_ENABLED=$(cgo) GOARCH=$(goarch) go build -ldflags="$(ldXflagsD)" -tags="$(tags)" -o bin/debug cmd/main.go + +test: + cfn generate + env GOOS=$(goos) CGO_ENABLED=$(cgo) GOARCH=$(goarch) go build -ldflags="$(ldXflags)" -tags="$(tags)" -o bin/bootstrap cmd/main.go + +clean: + rm -rf bin + +submit: clean build + @echo "==> Submitting to private registry for testing" + cfn submit --set-default --region us-east-1 + +create-test-resources: + @echo "==> Creating test files and resources for contract testing" + ./test/contract-testing/cfn-test-create.sh + +delete-test-resources: + @echo "==> Delete test resources used for contract testing" + ./test/contract-testing/cfn-test-delete.sh + +run-contract-testing: + @echo "==> Run contract testing" + make build + sam local start-lambda & + cfn test --function-name TestEntrypoint --verbose \ No newline at end of file diff --git a/cfn-resources/stream-processor/README.md b/cfn-resources/stream-processor/README.md new file mode 100644 index 000000000..d0899e0f0 --- /dev/null +++ b/cfn-resources/stream-processor/README.md @@ -0,0 +1,144 @@ +# MongoDB::Atlas::StreamProcessor + +## Description + +Resource for creating and managing [Stream Processors for an Atlas Stream Instance](https://www.mongodb.com/docs/api/doc/atlas-admin-api-v2/operation/operation-createstreamprocessor). + +## Requirements + +Set up an AWS profile to securely give CloudFormation access to your Atlas credentials. +For instructions on setting up a profile, [see here](/README.md#mongodb-atlas-api-keys-credential-management). + +## Attributes and Parameters + +See the [resource docs](docs/README.md). Also refer [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials. + +## CloudFormation Examples + +See the example [CFN Templates](/examples/atlas-streams/stream-processor/) for example resources: + +- [Basic Stream Processor](/examples/atlas-streams/stream-processor/stream-processor.json) +- [Stream Processor with DLQ](/examples/atlas-streams/stream-processor/stream-processor-with-dlq.json) + +## Prerequisites + +Before creating a stream processor, you must have: + +- An existing Atlas Project +- An existing Stream Instance/Workspace (created via `MongoDB::Atlas::StreamInstance` resource) +- At least one Stream Connection configured (created via `MongoDB::Atlas::StreamConnection` resource) + - A source connection (e.g., sample data source, cluster connection, or Kafka connection) + - A sink connection (must be a cluster connection for merge operations) + +## Deployment + +### Deploy Basic Stream Processor + +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-processor/stream-processor.json \ + --stack-name stream-processor-stack \ + --parameter-overrides \ + ProjectId= \ + WorkspaceName= \ + ProcessorName=my-processor \ + SourceConnectionName=sample_stream_solar \ + SinkConnectionName= \ + SinkDatabase=test \ + SinkCollection=output \ + State=CREATED \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +### Deploy Stream Processor with DLQ + +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-processor/stream-processor-with-dlq.json \ + --stack-name stream-processor-dlq-stack \ + --parameter-overrides \ + ProjectId= \ + WorkspaceName= \ + ProcessorName=my-processor-dlq \ + SourceConnectionName=sample_stream_solar \ + SinkConnectionName= \ + SinkDatabase=test \ + SinkCollection=output \ + DlqConnectionName= \ + DlqDatabase=dlq \ + DlqCollection=dlq-messages \ + State=CREATED \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +## Verification + +After deployment, verify the stream processor was created successfully using both Atlas CLI and Atlas UI. + +### Atlas CLI Verification + +```bash +# List all stream processors for a workspace +atlas streams processors list --projectId + +# Describe a specific stream processor +atlas streams processors describe \ + --instance \ + --projectId +``` + +### Expected CLI Output + +The `atlas streams processors describe` command should return: + +- `id`: Unique identifier of the processor (matches the `Id` attribute in CloudFormation) +- `name`: Processor name (matches `ProcessorName` parameter) +- `state`: Current state (CREATED, STARTED, STOPPED, or FAILED) +- `pipeline`: Array of pipeline stages matching your Pipeline configuration +- `options`: DLQ configuration if provided (should match your Options.Dlq settings) +- `stats`: Processing statistics (available when processor is STARTED) + +### Verify Pipeline Configuration + +The pipeline should match your CloudFormation template: + +- Source connection name should match `SourceConnectionName` parameter +- Merge target connection should match `SinkConnectionName` parameter +- Database and collection should match `SinkDatabase` and `SinkCollection` parameters + +### Verify DLQ Configuration (if applicable) + +For processors with DLQ: + +- `options.dlq.connectionName` should match `DlqConnectionName` parameter +- `options.dlq.db` should match `DlqDatabase` parameter +- `options.dlq.coll` should match `DlqCollection` parameter + +### Atlas UI Verification + +1. Navigate to your Atlas project in the [Atlas UI](https://cloud.mongodb.com) +2. Go to **Stream Processing** section +3. Select your stream workspace/instance +4. Verify the processor appears in the **Processors** tab with: + - **Name**: Matches the `ProcessorName` from your CloudFormation template + - **State**: Matches the `State` parameter (CREATED, STARTED, or STOPPED) + - **Pipeline**: Click on the processor to view pipeline stages and verify: + - Source connection matches your `SourceConnectionName` parameter + - Merge target connection matches your `SinkConnectionName` parameter + - Target database and collection match your `SinkDatabase` and `SinkCollection` parameters +5. For processors with DLQ: + - Verify DLQ configuration is displayed in the processor details + - Check that DLQ connection, database, and collection match your parameters +6. If processor is in STARTED state: + - Verify processing statistics are available + - Check that messages are being processed (stats show input/output message counts) + +## Notes + +- **AWS Only**: This CloudFormation resource is designed for AWS deployments. The provider is effectively AWS. +- **WorkspaceName vs InstanceName**: Use `WorkspaceName` (preferred). `InstanceName` is supported for backward compatibility but is deprecated. +- **State Management**: When creating a processor, specify `State: STARTED` to automatically start processing, or `State: CREATED` to create it in a stopped state. +- **Long-Running Operations**: Creating and starting stream processors can take several minutes. The resource uses callback-based state management to handle these operations asynchronously. +- **Timeout Configuration**: Use `Timeouts.Create` to configure how long to wait for processor creation/startup (default: 20 minutes). diff --git a/cfn-resources/stream-processor/cmd/main.go b/cfn-resources/stream-processor/cmd/main.go new file mode 100644 index 000000000..e34cceb2e --- /dev/null +++ b/cfn-resources/stream-processor/cmd/main.go @@ -0,0 +1,85 @@ +// Code generated by 'cfn generate', changes will be undone by the next invocation. DO NOT EDIT. +package main + +import ( + "errors" + "fmt" + "log" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn" + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" +) + +// Handler is a container for the CRUDL actions exported by resources +type Handler struct{} + +// Create wraps the related Create function exposed by the resource code +func (r *Handler) Create(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Create) +} + +// Read wraps the related Read function exposed by the resource code +func (r *Handler) Read(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Read) +} + +// Update wraps the related Update function exposed by the resource code +func (r *Handler) Update(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Update) +} + +// Delete wraps the related Delete function exposed by the resource code +func (r *Handler) Delete(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Delete) +} + +// List wraps the related List function exposed by the resource code +func (r *Handler) List(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.List) +} + +// main is the entry point of the application. +func main() { + cfn.Start(&Handler{}) +} + +type handlerFunc func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) + +func wrap(req handler.Request, f handlerFunc) (response handler.ProgressEvent) { + defer func() { + // Catch any panics and return a failed ProgressEvent + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = errors.New(fmt.Sprint(r)) + } + + log.Printf("Trapped error in handler: %v", err) + + response = handler.NewFailedEvent(err) + } + }() + + // Populate the previous model + prevModel := &resource.Model{} + if err := req.UnmarshalPrevious(prevModel); err != nil { + log.Printf("Error unmarshaling prev model: %v", err) + return handler.NewFailedEvent(err) + } + + // Populate the current model + currentModel := &resource.Model{} + if err := req.Unmarshal(currentModel); err != nil { + log.Printf("Error unmarshaling model: %v", err) + return handler.NewFailedEvent(err) + } + + response, err := f(req, prevModel, currentModel) + if err != nil { + log.Printf("Error returned from handler function: %v", err) + return handler.NewFailedEvent(err) + } + + return response +} diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks.go b/cfn-resources/stream-processor/cmd/resource/callbacks.go new file mode 100644 index 000000000..44c255c15 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/callbacks.go @@ -0,0 +1,255 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "context" + "fmt" + "maps" + + "go.mongodb.org/atlas-sdk/v20250312010/admin" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/logger" +) + +type CallbackData struct { + ProjectID string + WorkspaceOrInstanceName string + ProcessorName string + DesiredState string + StartTime string + TimeoutDuration string + NeedsStarting bool + DeleteOnCreateTimeout bool +} + +func IsCallback(req *handler.Request) bool { + _, found := req.CallbackContext["callbackStreamProcessor"] + return found +} + +func GetCallbackData(req handler.Request) *CallbackData { + ctx := &CallbackData{} + + if val, ok := req.CallbackContext["projectID"].(string); ok { + ctx.ProjectID = val + } + if val, ok := req.CallbackContext["workspaceName"].(string); ok { + ctx.WorkspaceOrInstanceName = val + } + if val, ok := req.CallbackContext["processorName"].(string); ok { + ctx.ProcessorName = val + } + if val, ok := req.CallbackContext["needsStarting"].(bool); ok { + ctx.NeedsStarting = val + } + if val, ok := req.CallbackContext["desiredState"].(string); ok { + ctx.DesiredState = val + } + if val, ok := req.CallbackContext["startTime"].(string); ok { + ctx.StartTime = val + } + if val, ok := req.CallbackContext["timeoutDuration"].(string); ok { + ctx.TimeoutDuration = val + } + if val, ok := req.CallbackContext["deleteOnCreateTimeout"].(bool); ok { + ctx.DeleteOnCreateTimeout = val + } + + return ctx +} + +func ValidateCallbackData(ctx *CallbackData) *handler.ProgressEvent { + if ctx.ProjectID == "" || ctx.WorkspaceOrInstanceName == "" || ctx.ProcessorName == "" { + return &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Missing required values in callback context", + } + } + return nil +} + +func BuildCallbackContext(projectID, workspaceOrInstanceName, processorName string, additionalFields map[string]any) map[string]any { + ctx := map[string]any{ + "callbackStreamProcessor": true, + "projectID": projectID, + "workspaceName": workspaceOrInstanceName, + "processorName": processorName, + } + + maps.Copy(ctx, additionalFields) + + return ctx +} + +func cleanupOnCreateTimeout(ctx context.Context, atlasClient *admin.APIClient, callbackCtx *CallbackData) error { + if !callbackCtx.DeleteOnCreateTimeout { + return nil + } + + _, err := atlasClient.StreamsApi.DeleteStreamProcessor(ctx, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName).Execute() + if err != nil { + _, _ = logger.Warnf("Cleanup delete failed: %v", err) + } + return nil +} + +func HandleCreateCallback(ctx context.Context, atlasClient *admin.APIClient, currentModel *Model, callbackCtx *CallbackData) (handler.ProgressEvent, error) { + needsStarting := callbackCtx.NeedsStarting + + if IsTimeoutExceeded(callbackCtx.StartTime, callbackCtx.TimeoutDuration) { + if err := cleanupOnCreateTimeout(context.Background(), atlasClient, callbackCtx); err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Timeout reached and cleanup failed: %s", err.Error()), + }, nil + } + cleanupMsg := "Timeout reached when waiting for stream processor creation" + if callbackCtx.DeleteOnCreateTimeout { + cleanupMsg += ". Resource has been deleted because delete_on_create_timeout is true. If you suspect a transient error, wait before retrying to allow resource deletion to finish." + } else { + cleanupMsg += ". Cleanup was not performed because delete_on_create_timeout is false." + } + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: cleanupMsg, + }, nil + } + + streamProcessor, peErr := getStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) + if peErr != nil { + return *peErr, nil + } + + currentState := streamProcessor.GetState() + + callbackContext := BuildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + "needsStarting": callbackCtx.NeedsStarting, + "startTime": callbackCtx.StartTime, + "timeoutDuration": callbackCtx.TimeoutDuration, + "deleteOnCreateTimeout": callbackCtx.DeleteOnCreateTimeout, + }) + + switch currentState { + case CreatedState: + if needsStarting { + if peErr := startStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + return *peErr, nil + } + return createInProgressEvent("Starting stream processor", currentModel, callbackContext), nil + } + return FinalizeModel(streamProcessor, currentModel, "Create Completed") + + case StartedState: + return FinalizeModel(streamProcessor, currentModel, "Create Completed") + + case InitiatingState, CreatingState: + return createInProgressEvent(fmt.Sprintf("Creating stream processor (current state: %s)", currentState), currentModel, callbackContext), nil + + case FailedState: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Stream processor entered FAILED state", + }, nil + + default: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Unexpected state during creation: %s", currentState), + }, nil + } +} + +func HandleUpdateCallback(ctx context.Context, atlasClient *admin.APIClient, currentModel *Model, callbackCtx *CallbackData) (handler.ProgressEvent, error) { + streamProcessor, peErr := getStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) + if peErr != nil { + return *peErr, nil + } + + desiredState := callbackCtx.DesiredState + if desiredState == "" { + desiredState = streamProcessor.GetState() + if desiredState == "" { + if currentModel != nil && currentModel.DesiredState != nil && *currentModel.DesiredState != "" { + desiredState = *currentModel.DesiredState + } else { + desiredState = CreatedState + } + } + } + + currentState := streamProcessor.GetState() + + callbackContext := BuildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + "desiredState": desiredState, + }) + + switch currentState { + case StoppedState, CreatedState: + modifyAPIRequestParams, err := NewStreamProcessorUpdateReq(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating update request: %s", err.Error()), + }, nil + } + + streamProcessorResp, apiResp, err := atlasClient.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() + if err != nil { + return HandleError(apiResp, constants.UPDATE, err) + } + + if desiredState == StartedState { + if peErr := startStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + return *peErr, nil + } + return createInProgressEvent("Starting stream processor", currentModel, callbackContext), nil + } + + return FinalizeModel(streamProcessorResp, currentModel, "Update Completed") + + case StartedState: + if desiredState == StartedState { + return FinalizeModel(streamProcessor, currentModel, "Update Completed") + } + + _, err := atlasClient.StreamsApi.StopStreamProcessorWithParams(ctx, + &admin.StopStreamProcessorApiParams{ + GroupId: callbackCtx.ProjectID, + TenantName: callbackCtx.WorkspaceOrInstanceName, + ProcessorName: callbackCtx.ProcessorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), + }, nil + } + return createInProgressEvent("Stopping stream processor", currentModel, callbackContext), nil + + case FailedState: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Stream processor entered FAILED state", + }, nil + + default: + return createInProgressEvent(fmt.Sprintf("Updating stream processor (current state: %s)", currentState), currentModel, callbackContext), nil + } +} diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks_test.go b/cfn-resources/stream-processor/cmd/resource/callbacks_test.go new file mode 100644 index 000000000..7958b181e --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/callbacks_test.go @@ -0,0 +1,481 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource_test + +import ( + "context" + "net/http" + "testing" + "time" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312010/mockadmin" +) + +var ( + baseModel = &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + } + baseCallbackCtx = &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + } +) + +func TestIsCallback(t *testing.T) { + testCases := map[string]struct { + req handler.Request + expectedResult bool + }{ + "isCallback": { + req: handler.Request{ + CallbackContext: map[string]any{"callbackStreamProcessor": true}, + }, + expectedResult: true, + }, + "notCallback": { + req: handler.Request{ + CallbackContext: map[string]any{}, + }, + expectedResult: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert.Equal(t, tc.expectedResult, resource.IsCallback(&tc.req)) + }) + } +} + +func TestGetCallbackData(t *testing.T) { + testCases := map[string]struct { + expectedResult *resource.CallbackData + req handler.Request + }{ + "allFieldsPresent": { + req: handler.Request{ + CallbackContext: map[string]any{ + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + "needsStarting": true, + "desiredState": "STARTED", + "startTime": "2024-01-01T00:00:00Z", + "timeoutDuration": "20m", + "deleteOnCreateTimeout": true, + }, + }, + expectedResult: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: true, + DesiredState: "STARTED", + StartTime: "2024-01-01T00:00:00Z", + TimeoutDuration: "20m", + DeleteOnCreateTimeout: true, + }, + }, + "partialFields": { + req: handler.Request{ + CallbackContext: map[string]any{ + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + }, + }, + expectedResult: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + }, + }, + "emptyContext": { + req: handler.Request{ + CallbackContext: map[string]any{}, + }, + expectedResult: &resource.CallbackData{}, + }, + "typeAssertionFailures": { + req: handler.Request{ + CallbackContext: map[string]any{ + "projectID": 123, + "workspaceName": true, + "processorName": []string{"invalid"}, + "needsStarting": "not a bool", + "desiredState": 456, + "startTime": struct{}{}, + "timeoutDuration": nil, + "deleteOnCreateTimeout": "not a bool", + }, + }, + expectedResult: &resource.CallbackData{}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + assert.Equal(t, tc.expectedResult, resource.GetCallbackData(tc.req)) + }) + } +} + +func TestValidateCallbackData(t *testing.T) { + validCtx := &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + } + + testCases := map[string]struct { + callbackCtx *resource.CallbackData + expectedMsgContain string + expectedError bool + }{ + "valid": { + callbackCtx: validCtx, + expectedError: false, + }, + "missingProjectID": { + callbackCtx: &resource.CallbackData{WorkspaceOrInstanceName: "workspace-1", ProcessorName: "processor-1"}, + expectedError: true, + expectedMsgContain: "Missing required values", + }, + "missingWorkspaceName": { + callbackCtx: &resource.CallbackData{ProjectID: "507f1f77bcf86cd799439011", ProcessorName: "processor-1"}, + expectedError: true, + expectedMsgContain: "Missing required values", + }, + "missingProcessorName": { + callbackCtx: &resource.CallbackData{ProjectID: "507f1f77bcf86cd799439011", WorkspaceOrInstanceName: "workspace-1"}, + expectedError: true, + expectedMsgContain: "Missing required values", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + peErr := resource.ValidateCallbackData(tc.callbackCtx) + if tc.expectedError { + require.NotNil(t, peErr) + assert.Contains(t, peErr.Message, tc.expectedMsgContain) + } else { + require.Nil(t, peErr) + } + }) + } +} + +func TestBuildCallbackContext(t *testing.T) { + testCases := map[string]struct { + additionalFields map[string]any + validateFunc func(t *testing.T, ctx map[string]any) + }{ + "basic": { + additionalFields: map[string]any{}, + validateFunc: func(t *testing.T, ctx map[string]any) { + t.Helper() + assert.True(t, ctx["callbackStreamProcessor"].(bool)) + assert.Equal(t, "507f1f77bcf86cd799439011", ctx["projectID"]) + assert.Equal(t, "workspace-1", ctx["workspaceName"]) + assert.Equal(t, "processor-1", ctx["processorName"]) + }, + }, + "withAdditionalFields": { + additionalFields: map[string]any{"needsStarting": true, "desiredState": "STARTED"}, + validateFunc: func(t *testing.T, ctx map[string]any) { + t.Helper() + assert.True(t, ctx["callbackStreamProcessor"].(bool)) + assert.Equal(t, "507f1f77bcf86cd799439011", ctx["projectID"]) + assert.True(t, ctx["needsStarting"].(bool)) + assert.Equal(t, "STARTED", ctx["desiredState"]) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + ctx := resource.BuildCallbackContext("507f1f77bcf86cd799439011", "workspace-1", "processor-1", tc.additionalFields) + if tc.validateFunc != nil { + tc.validateFunc(t, ctx) + } + }) + } +} + +func TestHandleCreateCallback(t *testing.T) { + mockClient := &admin.APIClient{StreamsApi: mockadmin.NewStreamsApi(t)} + ctx := context.Background() + + timeoutCtx := func(deleteOnTimeout bool) *resource.CallbackData { + ctx := *baseCallbackCtx + ctx.StartTime = time.Now().Add(-25 * time.Minute).Format(time.RFC3339) + ctx.DeleteOnCreateTimeout = deleteOnTimeout + return &ctx + } + + createMockProcessor := func(state string) *admin.StreamsProcessorWithStats { + return &admin.StreamsProcessorWithStats{Name: "processor-1", State: state} + } + + setupGetProcessor := func(m *mockadmin.StreamsApi, processor *admin.StreamsProcessorWithStats) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + } + + setupStartProcessor := func(m *mockadmin.StreamsApi) { + startReq := admin.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) + } + + testCases := map[string]struct { + currentModel *resource.Model + callbackCtx *resource.CallbackData + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + expectedMsg string + }{ + "timeoutExceededWithCleanup": { + currentModel: baseModel, + callbackCtx: timeoutCtx(true), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedStatus: handler.Failed, + expectedMsg: "Timeout reached", + }, + "timeoutExceededWithoutCleanup": { + currentModel: baseModel, + callbackCtx: timeoutCtx(false), + expectedStatus: handler.Failed, + expectedMsg: "Timeout reached", + }, + "createdStateNeedsStarting": { + currentModel: baseModel, + callbackCtx: func() *resource.CallbackData { + ctx := *baseCallbackCtx + ctx.NeedsStarting = true + return &ctx + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.CreatedState)) + setupStartProcessor(m) + }, + expectedStatus: handler.InProgress, + expectedMsg: "Starting stream processor", + }, + "createdStateNoStarting": { + currentModel: baseModel, + callbackCtx: func() *resource.CallbackData { + ctx := *baseCallbackCtx + ctx.NeedsStarting = false + return &ctx + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.CreatedState)) + }, + expectedStatus: handler.Success, + expectedMsg: "Create Completed", + }, + "startedState": { + currentModel: baseModel, + callbackCtx: baseCallbackCtx, + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.StartedState)) + }, + expectedStatus: handler.Success, + expectedMsg: "Create Completed", + }, + "creatingState": { + currentModel: baseModel, + callbackCtx: baseCallbackCtx, + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.CreatingState)) + }, + expectedStatus: handler.InProgress, + expectedMsg: "Creating stream processor", + }, + "failedState": { + currentModel: baseModel, + callbackCtx: baseCallbackCtx, + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.FailedState)) + }, + expectedStatus: handler.Failed, + expectedMsg: "FAILED state", + }, + "unexpectedState": { + currentModel: baseModel, + callbackCtx: baseCallbackCtx, + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, &admin.StreamsProcessorWithStats{Name: "processor-1", State: "UNKNOWN"}) + }, + expectedStatus: handler.Failed, + expectedMsg: "Unexpected state", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + if tc.mockSetup != nil { + tc.mockSetup(mockStreamsAPI) + } + mockClient.StreamsApi = mockStreamsAPI + + event, err := resource.HandleCreateCallback(ctx, mockClient, tc.currentModel, tc.callbackCtx) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + }) + } +} + +func TestHandleUpdateCallback(t *testing.T) { + mockClient := &admin.APIClient{StreamsApi: mockadmin.NewStreamsApi(t)} + ctx := context.Background() + + createMockProcessor := func(state string) *admin.StreamsProcessorWithStats { + return &admin.StreamsProcessorWithStats{Name: "processor-1", State: state} + } + + setupGetProcessor := func(m *mockadmin.StreamsApi, processor *admin.StreamsProcessorWithStats) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + } + + setupUpdateProcessor := func(m *mockadmin.StreamsApi, updatedState string) { + updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(createMockProcessor(updatedState), &http.Response{StatusCode: 200}, nil) + } + + setupStartProcessor := func(m *mockadmin.StreamsApi) { + startReq := admin.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) + } + + setupStopProcessor := func(m *mockadmin.StreamsApi) { + stopReq := admin.StopStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) + m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(nil, nil) + } + + testCases := map[string]struct { + currentModel *resource.Model + callbackCtx *resource.CallbackData + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + expectedMsg string + }{ + "stoppedStateWithDesiredStarted": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: func() *resource.CallbackData { + ctx := *baseCallbackCtx + ctx.DesiredState = resource.StartedState + return &ctx + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.StoppedState)) + setupUpdateProcessor(m, resource.StoppedState) + setupStartProcessor(m) + }, + expectedStatus: handler.InProgress, + expectedMsg: "Starting stream processor", + }, + "startedStateWithDesiredStarted": { + currentModel: baseModel, + callbackCtx: func() *resource.CallbackData { + ctx := *baseCallbackCtx + ctx.DesiredState = resource.StartedState + return &ctx + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.StartedState)) + }, + expectedStatus: handler.Success, + expectedMsg: "Update Completed", + }, + "startedStateWithDesiredStopped": { + currentModel: baseModel, + callbackCtx: func() *resource.CallbackData { + ctx := *baseCallbackCtx + ctx.DesiredState = resource.StoppedState + return &ctx + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.StartedState)) + setupStopProcessor(m) + }, + expectedStatus: handler.InProgress, + expectedMsg: "Stopping stream processor", + }, + "failedState": { + currentModel: baseModel, + callbackCtx: baseCallbackCtx, + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, createMockProcessor(resource.FailedState)) + }, + expectedStatus: handler.Failed, + expectedMsg: "FAILED state", + }, + "defaultState": { + currentModel: baseModel, + callbackCtx: baseCallbackCtx, + mockSetup: func(m *mockadmin.StreamsApi) { + setupGetProcessor(m, &admin.StreamsProcessorWithStats{Name: "processor-1", State: "UNKNOWN"}) + }, + expectedStatus: handler.InProgress, + expectedMsg: "Updating stream processor", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + if tc.mockSetup != nil { + tc.mockSetup(mockStreamsAPI) + } + mockClient.StreamsApi = mockStreamsAPI + + event, err := resource.HandleUpdateCallback(ctx, mockClient, tc.currentModel, tc.callbackCtx) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + }) + } +} diff --git a/cfn-resources/stream-processor/cmd/resource/config.go b/cfn-resources/stream-processor/cmd/resource/config.go new file mode 100644 index 000000000..4d9eb7831 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/config.go @@ -0,0 +1,19 @@ +// Code generated by 'cfn generate', changes will be undone by the next invocation. DO NOT EDIT. +// Updates to this type are made my editing the schema file and executing the 'generate' command. +package resource + +import "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + +// TypeConfiguration is autogenerated from the json schema +type TypeConfiguration struct { +} + +// Configuration returns a resource's configuration. +func Configuration(req handler.Request) (*TypeConfiguration, error) { + // Populate the type configuration + typeConfig := &TypeConfiguration{} + if err := req.UnmarshalTypeConfig(typeConfig); err != nil { + return typeConfig, err + } + return typeConfig, nil +} diff --git a/cfn-resources/stream-processor/cmd/resource/helpers.go b/cfn-resources/stream-processor/cmd/resource/helpers.go new file mode 100644 index 000000000..1bb7668aa --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/helpers.go @@ -0,0 +1,211 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "context" + "fmt" + "net/http" + "time" + + "go.mongodb.org/atlas-sdk/v20250312010/admin" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/logger" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/progressevent" +) + +func CopyIdentifyingFields(resourceModel, currentModel *Model) { + resourceModel.Profile = currentModel.Profile + resourceModel.ProjectId = currentModel.ProjectId + resourceModel.ProcessorName = currentModel.ProcessorName + + switch { + case currentModel.WorkspaceName != nil && *currentModel.WorkspaceName != "": + resourceModel.WorkspaceName = currentModel.WorkspaceName + resourceModel.InstanceName = util.Pointer(*currentModel.WorkspaceName) + case currentModel.InstanceName != nil && *currentModel.InstanceName != "": + resourceModel.InstanceName = currentModel.InstanceName + resourceModel.WorkspaceName = util.Pointer(*currentModel.InstanceName) + default: + resourceModel.WorkspaceName = currentModel.WorkspaceName + resourceModel.InstanceName = currentModel.InstanceName + } +} + +func ParseTimeout(timeoutStr string) time.Duration { + if timeoutStr == "" { + return DefaultCreateTimeout + } + duration, err := time.ParseDuration(timeoutStr) + if err != nil { + _, _ = logger.Warnf("Invalid timeout format '%s', using default: %v", timeoutStr, err) + return DefaultCreateTimeout + } + return duration +} + +func IsTimeoutExceeded(startTimeStr, timeoutDurationStr string) bool { + if startTimeStr == "" || timeoutDurationStr == "" { + return false + } + + startTime, err := time.Parse(time.RFC3339, startTimeStr) + if err != nil { + _, _ = logger.Warnf("Invalid start time format '%s': %v", startTimeStr, err) + return false + } + + timeoutDuration := ParseTimeout(timeoutDurationStr) + elapsed := time.Since(startTime) + + return elapsed >= timeoutDuration +} + +func FinalizeModel(streamProcessor *admin.StreamsProcessorWithStats, currentModel *Model, message string) (handler.ProgressEvent, error) { + resourceModel, err := GetStreamProcessorModel(streamProcessor, currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + }, nil + } + + CopyIdentifyingFields(resourceModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: message, + ResourceModel: resourceModel, + }, nil +} + +func getAllStreamProcessors(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName string) ([]admin.StreamsProcessorWithStats, *http.Response, error) { + pageNum := 1 + accumulatedProcessors := make([]admin.StreamsProcessorWithStats, 0) + + for allRecordsRetrieved := false; !allRecordsRetrieved; { + processorsResp, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorsWithParams(ctx, &admin.GetStreamProcessorsApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ItemsPerPage: util.Pointer(constants.DefaultListItemsPerPage), + PageNum: util.Pointer(pageNum), + }).Execute() + + if err != nil { + return nil, apiResp, err + } + + results := processorsResp.GetResults() + accumulatedProcessors = append(accumulatedProcessors, results...) + + totalCount := processorsResp.GetTotalCount() + allRecordsRetrieved = totalCount <= len(accumulatedProcessors) || len(results) < constants.DefaultListItemsPerPage + pageNum++ + } + + return accumulatedProcessors, nil, nil +} + +func getStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName, processorName string) (*admin.StreamsProcessorWithStats, *handler.ProgressEvent) { + requestParams := &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + } + + streamProcessor, resp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil, &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Stream processor not found", + HandlerErrorCode: "NotFound", + } + } + return nil, &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error getting stream processor: %s", err.Error()), + } + } + return streamProcessor, nil +} + +func startStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName, processorName string) *handler.ProgressEvent { + _, err := atlasClient.StreamsApi.StartStreamProcessorWithParams(ctx, + &admin.StartStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error starting stream processor: %s", err.Error()), + } + } + return nil +} + +func createInProgressEvent(message string, currentModel *Model, callbackContext map[string]any) handler.ProgressEvent { + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: message, + ResourceModel: inProgressModel, + CallbackDelaySeconds: defaultCallbackDelaySeconds, + CallbackContext: callbackContext, + } +} + +func ValidateUpdateStateTransition(currentState, desiredState string) (errMsg string, isValidTransition bool) { + if currentState == desiredState { + return "", true + } + + if desiredState == StoppedState && currentState != StartedState { + return fmt.Sprintf("Stream Processor must be in %s state to transition to %s state", StartedState, StoppedState), false + } + + if desiredState == CreatedState { + return fmt.Sprintf("Stream Processor cannot transition from %s to CREATED", currentState), false + } + + return "", true +} + +func HandleError(response *http.Response, method constants.CfnFunctions, err error) (handler.ProgressEvent, error) { + errMsg := fmt.Sprintf("%s error:%s", method, err.Error()) + + if response != nil && response.StatusCode == http.StatusConflict { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: errMsg, + HandlerErrorCode: "AlreadyExists", + }, nil + } + + return progressevent.GetFailedEventByResponse(errMsg, response), nil +} diff --git a/cfn-resources/stream-processor/cmd/resource/helpers_test.go b/cfn-resources/stream-processor/cmd/resource/helpers_test.go new file mode 100644 index 000000000..77fb3a6a5 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/helpers_test.go @@ -0,0 +1,325 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource_test + +import ( + "errors" + "net/http" + "testing" + "time" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20250312010/admin" +) + +func TestCopyIdentifyingFields(t *testing.T) { + testCases := map[string]struct { + resourceModel *resource.Model + currentModel *resource.Model + validateFunc func(t *testing.T, resourceModel *resource.Model) + }{ + "withWorkspaceName": { + resourceModel: &resource.Model{}, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + validateFunc: func(t *testing.T, rm *resource.Model) { + t.Helper() + assert.Equal(t, "default", util.SafeString(rm.Profile)) + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(rm.ProjectId)) + assert.Equal(t, "processor-1", util.SafeString(rm.ProcessorName)) + assert.Equal(t, "workspace-1", util.SafeString(rm.WorkspaceName)) + assert.Equal(t, "workspace-1", util.SafeString(rm.InstanceName)) + }, + }, + "withInstanceName": { + resourceModel: &resource.Model{}, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + InstanceName: util.StringPtr("instance-1"), + }, + validateFunc: func(t *testing.T, rm *resource.Model) { + t.Helper() + assert.Equal(t, "default", util.SafeString(rm.Profile)) + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(rm.ProjectId)) + assert.Equal(t, "processor-1", util.SafeString(rm.ProcessorName)) + assert.Equal(t, "instance-1", util.SafeString(rm.InstanceName)) + assert.Equal(t, "instance-1", util.SafeString(rm.WorkspaceName)) + }, + }, + "emptyWorkspaceName": { + resourceModel: &resource.Model{}, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr(""), + }, + validateFunc: func(t *testing.T, rm *resource.Model) { + t.Helper() + assert.Nil(t, rm.WorkspaceName) + }, + }, + "bothNil": { + resourceModel: &resource.Model{}, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: nil, + InstanceName: nil, + }, + validateFunc: func(t *testing.T, rm *resource.Model) { + t.Helper() + assert.Nil(t, rm.WorkspaceName) + assert.Nil(t, rm.InstanceName) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + resource.CopyIdentifyingFields(tc.resourceModel, tc.currentModel) + if tc.validateFunc != nil { + tc.validateFunc(t, tc.resourceModel) + } + }) + } +} + +func TestParseTimeout(t *testing.T) { + testCases := map[string]struct { + timeoutStr string + expectedResult time.Duration + }{ + "validDuration": { + timeoutStr: "20m", + expectedResult: 20 * time.Minute, + }, + "validSeconds": { + timeoutStr: "30s", + expectedResult: 30 * time.Second, + }, + "emptyString": { + timeoutStr: "", + expectedResult: resource.DefaultCreateTimeout, + }, + "invalidFormat": { + timeoutStr: "invalid", + expectedResult: resource.DefaultCreateTimeout, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := resource.ParseTimeout(tc.timeoutStr) + assert.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestIsTimeoutExceeded(t *testing.T) { + testCases := map[string]struct { + startTimeStr string + timeoutDurationStr string + expectedResult bool + }{ + "timeoutExceeded": { + startTimeStr: time.Now().Add(-25 * time.Minute).Format(time.RFC3339), + timeoutDurationStr: "20m", + expectedResult: true, + }, + "timeoutNotExceeded": { + startTimeStr: time.Now().Add(-10 * time.Minute).Format(time.RFC3339), + timeoutDurationStr: "20m", + expectedResult: false, + }, + "emptyStartTime": { + startTimeStr: "", + timeoutDurationStr: "20m", + expectedResult: false, + }, + "emptyTimeoutDuration": { + startTimeStr: time.Now().Format(time.RFC3339), + timeoutDurationStr: "", + expectedResult: false, + }, + "invalidStartTime": { + startTimeStr: "invalid", + timeoutDurationStr: "20m", + expectedResult: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := resource.IsTimeoutExceeded(tc.startTimeStr, tc.timeoutDurationStr) + assert.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestValidateUpdateStateTransition(t *testing.T) { + testCases := map[string]struct { + currentState string + desiredState string + expectedErrMsg string + expectedIsValid bool + }{ + "validCREATEDtoSTARTED": { + currentState: resource.CreatedState, + desiredState: resource.StartedState, + expectedIsValid: true, + }, + "invalidSTARTEDtoCREATED": { + currentState: resource.StartedState, + desiredState: resource.CreatedState, + expectedIsValid: false, + expectedErrMsg: "cannot transition from STARTED to CREATED", + }, + "validSTARTEDtoSTOPPED": { + currentState: resource.StartedState, + desiredState: resource.StoppedState, + expectedIsValid: true, + }, + "invalidCREATEDtoSTOPPED": { + currentState: resource.CreatedState, + desiredState: resource.StoppedState, + expectedIsValid: false, + expectedErrMsg: "must be in STARTED state", + }, + "sameState": { + currentState: resource.CreatedState, + desiredState: resource.CreatedState, + expectedIsValid: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + errMsg, isValid := resource.ValidateUpdateStateTransition(tc.currentState, tc.desiredState) + assert.Equal(t, tc.expectedIsValid, isValid) + if !tc.expectedIsValid { + assert.Contains(t, errMsg, tc.expectedErrMsg) + } + }) + } +} + +func TestHandleError(t *testing.T) { + testCases := map[string]struct { + response *http.Response + method constants.CfnFunctions + err error + expectedStatus handler.Status + expectedErrCode string + expectedMsg string + }{ + "conflictError": { + response: &http.Response{ + StatusCode: http.StatusConflict, + }, + method: constants.CREATE, + err: errors.New("already exists"), + expectedStatus: handler.Failed, + expectedErrCode: "AlreadyExists", + expectedMsg: "CREATE error:already exists", + }, + "genericError": { + response: &http.Response{ + StatusCode: http.StatusBadRequest, + }, + method: constants.UPDATE, + err: errors.New("bad request"), + expectedStatus: handler.Failed, + expectedMsg: "UPDATE error:bad request", + }, + "nilResponse": { + response: nil, + method: constants.DELETE, + err: errors.New("network error"), + expectedStatus: handler.Failed, + expectedMsg: "DELETE error:network error", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + event, err := resource.HandleError(tc.response, tc.method, tc.err) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + if tc.expectedErrCode != "" { + assert.Equal(t, tc.expectedErrCode, event.HandlerErrorCode) + } + }) + } +} + +func TestFinalizeModel(t *testing.T) { + testCases := map[string]struct { + streamProcessor *admin.StreamsProcessorWithStats + currentModel *resource.Model + validateFunc func(t *testing.T, event handler.ProgressEvent) + message string + expectedStatus handler.Status + expectedMsg string + }{ + "success": { + streamProcessor: &admin.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + }, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + message: "Create Complete", + expectedStatus: handler.Success, + expectedMsg: "Create Complete", + validateFunc: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + model, ok := event.ResourceModel.(*resource.Model) + require.True(t, ok, "ResourceModel should be *resource.Model") + assert.Equal(t, "processor-1", util.SafeString(model.ProcessorName)) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + event, err := resource.FinalizeModel(tc.streamProcessor, tc.currentModel, tc.message) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Equal(t, tc.expectedMsg, event.Message) + if tc.validateFunc != nil { + tc.validateFunc(t, event) + } + }) + } +} diff --git a/cfn-resources/stream-processor/cmd/resource/mappings.go b/cfn-resources/stream-processor/cmd/resource/mappings.go new file mode 100644 index 000000000..6ddf8eaf8 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/mappings.go @@ -0,0 +1,185 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "encoding/json" + "fmt" + + "go.mongodb.org/atlas-sdk/v20250312010/admin" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" +) + +func GetWorkspaceOrInstanceName(model *Model) (string, error) { + if model.WorkspaceName != nil && *model.WorkspaceName != "" { + return *model.WorkspaceName, nil + } + if model.InstanceName != nil && *model.InstanceName != "" { + return *model.InstanceName, nil + } + return "", fmt.Errorf("either WorkspaceName or InstanceName must be provided") +} + +func ConvertPipelineToSdk(pipeline string) ([]any, error) { + var pipelineSliceOfMaps []any + err := json.Unmarshal([]byte(pipeline), &pipelineSliceOfMaps) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal pipeline: %w", err) + } + return pipelineSliceOfMaps, nil +} + +func ConvertPipelineToString(pipeline []any) (string, error) { + pipelineJSON, err := json.Marshal(pipeline) + if err != nil { + return "", fmt.Errorf("failed to marshal pipeline: %w", err) + } + return string(pipelineJSON), nil +} + +func ConvertStatsToString(stats any) (string, error) { + if stats == nil { + return "", nil + } + statsJSON, err := json.Marshal(stats) + if err != nil { + return "", fmt.Errorf("failed to marshal stats: %w", err) + } + return string(statsJSON), nil +} + +func NewStreamProcessorReq(model *Model) (*admin.StreamsProcessor, error) { + pipeline, err := ConvertPipelineToSdk(util.SafeString(model.Pipeline)) + if err != nil { + return nil, err + } + + streamProcessor := &admin.StreamsProcessor{ + Name: model.ProcessorName, + Pipeline: &pipeline, + } + + if model.Options != nil && model.Options.Dlq != nil { + dlq := model.Options.Dlq + if dlq.Coll != nil && *dlq.Coll != "" && + dlq.ConnectionName != nil && *dlq.ConnectionName != "" && + dlq.Db != nil && *dlq.Db != "" { + streamProcessor.Options = &admin.StreamsOptions{ + Dlq: &admin.StreamsDLQ{ + Coll: dlq.Coll, + ConnectionName: dlq.ConnectionName, + Db: dlq.Db, + }, + } + } + } + + return streamProcessor, nil +} + +func NewStreamProcessorUpdateReq(model *Model) (*admin.UpdateStreamProcessorApiParams, error) { + pipeline, err := ConvertPipelineToSdk(util.SafeString(model.Pipeline)) + if err != nil { + return nil, err + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) + if err != nil { + return nil, err + } + + streamProcessorAPIParams := &admin.UpdateStreamProcessorApiParams{ + GroupId: util.SafeString(model.ProjectId), + TenantName: workspaceOrInstanceName, + ProcessorName: util.SafeString(model.ProcessorName), + StreamsModifyStreamProcessor: &admin.StreamsModifyStreamProcessor{ + Name: model.ProcessorName, + Pipeline: &pipeline, + }, + } + + if model.Options != nil && model.Options.Dlq != nil { + dlq := model.Options.Dlq + if dlq.Coll != nil && *dlq.Coll != "" && + dlq.ConnectionName != nil && *dlq.ConnectionName != "" && + dlq.Db != nil && *dlq.Db != "" { + streamProcessorAPIParams.StreamsModifyStreamProcessor.Options = &admin.StreamsModifyStreamProcessorOptions{ + Dlq: &admin.StreamsDLQ{ + Coll: dlq.Coll, + ConnectionName: dlq.ConnectionName, + Db: dlq.Db, + }, + } + } + } + + return streamProcessorAPIParams, nil +} + +func GetStreamProcessorModel(streamProcessor *admin.StreamsProcessorWithStats, currentModel *Model) (*Model, error) { + model := new(Model) + + if currentModel != nil { + *model = *currentModel + model.DeleteOnCreateTimeout = nil + } + + model.ProcessorName = util.Pointer(streamProcessor.Name) + model.Id = util.Pointer(streamProcessor.Id) + model.State = util.Pointer(streamProcessor.State) + + if currentModel != nil && currentModel.Pipeline != nil { + model.Pipeline = currentModel.Pipeline + } else if streamProcessor.Pipeline != nil { + pipelineStr, err := ConvertPipelineToString(streamProcessor.GetPipeline()) + if err != nil { + return nil, err + } + model.Pipeline = &pipelineStr + } + + if streamProcessor.Stats != nil { + statsStr, err := ConvertStatsToString(streamProcessor.GetStats()) + if err != nil { + return nil, err + } + model.Stats = &statsStr + } + + if streamProcessor.Options != nil && streamProcessor.Options.Dlq != nil { + apiDlq := streamProcessor.Options.Dlq + if apiDlq.Coll != nil && *apiDlq.Coll != "" && + apiDlq.ConnectionName != nil && *apiDlq.ConnectionName != "" && + apiDlq.Db != nil && *apiDlq.Db != "" { + model.Options = &StreamsOptions{ + Dlq: &StreamsDLQ{ + Coll: apiDlq.Coll, + ConnectionName: apiDlq.ConnectionName, + Db: apiDlq.Db, + }, + } + } + } else if currentModel != nil && currentModel.Options != nil && currentModel.Options.Dlq != nil { + currentDlq := currentModel.Options.Dlq + if currentDlq.Coll != nil && *currentDlq.Coll != "" && + currentDlq.ConnectionName != nil && *currentDlq.ConnectionName != "" && + currentDlq.Db != nil && *currentDlq.Db != "" { + model.Options = currentModel.Options + } + } + + return model, nil +} diff --git a/cfn-resources/stream-processor/cmd/resource/mappings_test.go b/cfn-resources/stream-processor/cmd/resource/mappings_test.go new file mode 100644 index 000000000..e7dc7e745 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/mappings_test.go @@ -0,0 +1,375 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource_test + +import ( + "encoding/json" + "testing" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20250312010/admin" +) + +func assertJSONEqual(t *testing.T, expected, actual string) { + t.Helper() + var expectedJSON, actualJSON any + require.NoError(t, json.Unmarshal([]byte(expected), &expectedJSON)) + require.NoError(t, json.Unmarshal([]byte(actual), &actualJSON)) + assert.Equal(t, expectedJSON, actualJSON) +} + +func TestGetWorkspaceOrInstanceName(t *testing.T) { + testCases := map[string]struct { + model *resource.Model + expectedResult string + expectedError string + }{ + "workspaceName": { + model: &resource.Model{WorkspaceName: util.StringPtr("workspace-1")}, + expectedResult: "workspace-1", + }, + "instanceName": { + model: &resource.Model{InstanceName: util.StringPtr("instance-1")}, + expectedResult: "instance-1", + }, + "neitherSet": { + model: &resource.Model{}, + expectedError: "either WorkspaceName or InstanceName must be provided", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.GetWorkspaceOrInstanceName(tc.model) + if tc.expectedError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedError) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedResult, result) + } + }) + } +} + +func TestConvertPipelineToSdk(t *testing.T) { + testCases := map[string]struct { + pipeline string + expectedError bool + }{ + "validPipeline": { + pipeline: `[{"$match": {"status": "active"}}]`, + }, + "invalidJSON": { + pipeline: `[{"$match": {"status": "active"}`, + expectedError: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.ConvertPipelineToSdk(tc.pipeline) + if tc.expectedError { + require.Error(t, err) + assert.Nil(t, result) + } else { + require.NoError(t, err) + assert.NotNil(t, result) + } + }) + } +} + +func TestConvertPipelineToString(t *testing.T) { + testCases := map[string]struct { + expectedJSON string + pipeline []any + }{ + "validPipeline": { + pipeline: []any{map[string]any{"$match": map[string]any{"status": "active"}}}, + expectedJSON: `[{"$match":{"status":"active"}}]`, + }, + "nilPipeline": { + pipeline: nil, + expectedJSON: `null`, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.ConvertPipelineToString(tc.pipeline) + require.NoError(t, err) + assertJSONEqual(t, tc.expectedJSON, result) + }) + } +} + +func TestConvertStatsToString(t *testing.T) { + result, err := resource.ConvertStatsToString(map[string]any{"bytesProcessed": 1000}) + require.NoError(t, err) + assertJSONEqual(t, `{"bytesProcessed":1000}`, result) + + result, err = resource.ConvertStatsToString(nil) + require.NoError(t, err) + assert.Empty(t, result) +} + +func TestNewStreamProcessorReq(t *testing.T) { + validPipeline := `[{"$match": {"status": "active"}}]` + validDLQ := &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("dlq-collection"), + ConnectionName: util.StringPtr("dlq-connection"), + Db: util.StringPtr("dlq-db"), + }, + } + + testCases := map[string]struct { + model *resource.Model + expectedError bool + checkOptions bool + }{ + "minimalRequest": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(validPipeline), + }, + }, + "withOptions": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(validPipeline), + Options: validDLQ, + }, + checkOptions: true, + }, + "invalidPipeline": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`invalid json`), + }, + expectedError: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.NewStreamProcessorReq(tc.model) + if tc.expectedError { + require.Error(t, err) + assert.Nil(t, result) + } else { + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, "test-processor", result.GetName()) + if tc.checkOptions { + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + } + } + }) + } +} + +func TestNewStreamProcessorUpdateReq(t *testing.T) { + validPipeline := `[{"$match": {"status": "active"}}]` + validDLQ := &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("dlq-collection"), + ConnectionName: util.StringPtr("dlq-connection"), + Db: util.StringPtr("dlq-db"), + }, + } + + testCases := map[string]struct { + model *resource.Model + checkTenant string + expectedError bool + checkOptions bool + }{ + "minimalRequest": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(validPipeline), + }, + checkTenant: "workspace-1", + }, + "withOptions": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(validPipeline), + Options: validDLQ, + }, + checkTenant: "workspace-1", + checkOptions: true, + }, + "invalidPipeline": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`invalid json`), + }, + expectedError: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.NewStreamProcessorUpdateReq(tc.model) + if tc.expectedError { + require.Error(t, err) + assert.Nil(t, result) + } else { + require.NoError(t, err) + require.NotNil(t, result) + assert.Equal(t, "507f1f77bcf86cd799439011", result.GroupId) + assert.Equal(t, "test-processor", result.ProcessorName) + if tc.checkTenant != "" { + assert.Equal(t, tc.checkTenant, result.TenantName) + } + if tc.checkOptions { + require.NotNil(t, result.StreamsModifyStreamProcessor.Options.Dlq) + assert.Equal(t, "dlq-collection", result.StreamsModifyStreamProcessor.Options.Dlq.GetColl()) + } + } + }) + } +} + +func TestGetStreamProcessorModel(t *testing.T) { + validDLQ := &admin.StreamsDLQ{ + Coll: admin.PtrString("dlq-collection"), + ConnectionName: admin.PtrString("dlq-connection"), + Db: admin.PtrString("dlq-db"), + } + currentModelWithDLQ := &resource.Model{ + Options: &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("existing-dlq-collection"), + ConnectionName: util.StringPtr("existing-dlq-connection"), + Db: util.StringPtr("existing-dlq-db"), + }, + }, + } + + testCases := map[string]struct { + streamProcessor *admin.StreamsProcessorWithStats + currentModel *resource.Model + checkFields []string + }{ + "minimalConversion": { + streamProcessor: &admin.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + }, + checkFields: []string{"name", "id", "state"}, + }, + "withAllFields": { + streamProcessor: &admin.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "STARTED", + Pipeline: []any{map[string]any{"$match": map[string]any{"status": "active"}}}, + Stats: map[string]any{"bytesProcessed": 5000, "recordsProcessed": 500}, + Options: &admin.StreamsOptions{Dlq: validDLQ}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + }, + checkFields: []string{"all"}, + }, + "preserveCurrentModelOptions": { + streamProcessor: &admin.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + }, + currentModel: currentModelWithDLQ, + checkFields: []string{"preservedOptions"}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.GetStreamProcessorModel(tc.streamProcessor, tc.currentModel) + require.NoError(t, err) + require.NotNil(t, result) + + for _, field := range tc.checkFields { + switch field { + case "name": + assert.Equal(t, "test-processor", util.SafeString(result.ProcessorName)) + case "id": + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(result.Id)) + case "state": + assert.Equal(t, tc.streamProcessor.State, util.SafeString(result.State)) + case "all": + assert.Equal(t, "test-processor", util.SafeString(result.ProcessorName)) + assert.NotNil(t, result.Pipeline) + assert.NotNil(t, result.Stats) + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + case "preservedOptions": + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "existing-dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + } + } + }) + } +} + +func TestRoundTripConversions(t *testing.T) { + t.Run("pipelineRoundTrip", func(t *testing.T) { + originalPipeline := `[{"$match": {"status": "active"}}, {"$group": {"_id": "$category", "count": {"$sum": 1}}}]` + sdkPipeline, err := resource.ConvertPipelineToSdk(originalPipeline) + require.NoError(t, err) + convertedBack, err := resource.ConvertPipelineToString(sdkPipeline) + require.NoError(t, err) + + var original, converted any + require.NoError(t, json.Unmarshal([]byte(originalPipeline), &original)) + require.NoError(t, json.Unmarshal([]byte(convertedBack), &converted)) + assert.Equal(t, original, converted) + }) + + t.Run("statsRoundTrip", func(t *testing.T) { + originalStats := map[string]any{ + "bytesProcessed": 1000, + "recordsProcessed": 100, + "nested": map[string]any{"value": 42}, + } + statsString, err := resource.ConvertStatsToString(originalStats) + require.NoError(t, err) + + var parsedStats any + require.NoError(t, json.Unmarshal([]byte(statsString), &parsedStats)) + parsedMap := parsedStats.(map[string]any) + + assert.InDelta(t, float64(1000), parsedMap["bytesProcessed"], 0.01) + assert.InDelta(t, float64(100), parsedMap["recordsProcessed"], 0.01) + nested := parsedMap["nested"].(map[string]any) + assert.InDelta(t, float64(42), nested["value"], 0.01) + }) +} diff --git a/cfn-resources/stream-processor/cmd/resource/model.go b/cfn-resources/stream-processor/cmd/resource/model.go new file mode 100644 index 000000000..282c8e1a8 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/model.go @@ -0,0 +1,37 @@ +// Code generated by 'cfn generate', changes will be undone by the next invocation. DO NOT EDIT. +// Updates to this type are made my editing the schema file and executing the 'generate' command. +package resource + +// Model is autogenerated from the json schema +type Model struct { + Profile *string `json:",omitempty"` + ProjectId *string `json:",omitempty"` + InstanceName *string `json:",omitempty"` + WorkspaceName *string `json:",omitempty"` + ProcessorName *string `json:",omitempty"` + Pipeline *string `json:",omitempty"` + DesiredState *string `json:",omitempty"` + State *string `json:",omitempty"` + Options *StreamsOptions `json:",omitempty"` + Id *string `json:",omitempty"` + Stats *string `json:",omitempty"` + Timeouts *Timeouts `json:",omitempty"` + DeleteOnCreateTimeout *bool `json:",omitempty"` +} + +// StreamsOptions is autogenerated from the json schema +type StreamsOptions struct { + Dlq *StreamsDLQ `json:",omitempty"` +} + +// StreamsDLQ is autogenerated from the json schema +type StreamsDLQ struct { + Coll *string `json:",omitempty"` + ConnectionName *string `json:",omitempty"` + Db *string `json:",omitempty"` +} + +// Timeouts is autogenerated from the json schema +type Timeouts struct { + Create *string `json:",omitempty"` +} diff --git a/cfn-resources/stream-processor/cmd/resource/resource.go b/cfn-resources/stream-processor/cmd/resource/resource.go new file mode 100644 index 000000000..43fe9cdfd --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/resource.go @@ -0,0 +1,446 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "context" + "fmt" + "net/http" + "time" + + "go.mongodb.org/atlas-sdk/v20250312010/admin" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/validator" +) + +const ( + InitiatingState = "INIT" + CreatingState = "CREATING" + CreatedState = "CREATED" + StartedState = "STARTED" + StoppedState = "STOPPED" + DroppedState = "DROPPED" + FailedState = "FAILED" +) + +const ( + defaultCallbackDelaySeconds = 3 + DefaultCreateTimeout = 20 * time.Minute +) + +func Setup() { + util.SetupLogger("mongodb-atlas-stream-processor") +} + +var CreateRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} +var ReadRequiredFields = []string{constants.ProjectID, constants.ProcessorName} +var UpdateRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} +var DeleteRequiredFields = []string{constants.ProjectID, constants.ProcessorName} +var ListRequiredFields = []string{constants.ProjectID} + +var InitEnvWithLatestClient = func(req handler.Request, currentModel *Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { + Setup() + util.SetDefaultProfileIfNotDefined(¤tModel.Profile) + + if errEvent := validator.ValidateModel(requiredFields, currentModel); errEvent != nil { + return nil, errEvent + } + + client, peErr := util.NewAtlasClient(&req, currentModel.Profile) + if peErr != nil { + return nil, peErr + } + return client.AtlasSDK, nil +} + +// Create handles the Create event from the Cloudformation service. +func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, CreateRequiredFields) + if peErr != nil { + return *peErr, nil + } + + if IsCallback(&req) { + callbackCtx := GetCallbackData(req) + if peErr := ValidateCallbackData(callbackCtx); peErr != nil { + return *peErr, nil + } + return HandleCreateCallback( + context.Background(), + atlasClient, + currentModel, + callbackCtx, + ) + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + var needsStarting bool + if currentModel.DesiredState != nil { + state := *currentModel.DesiredState + switch state { + case StartedState: + needsStarting = true + case CreatedState: + needsStarting = false + default: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "When creating a stream processor, the only valid states are CREATED and STARTED", + }, nil + } + } + + streamProcessorReq, err := NewStreamProcessorReq(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating stream processor request: %s", err.Error()), + }, nil + } + + _, apiResp, err := atlasClient.StreamsApi.CreateStreamProcessor(ctx, projectID, workspaceOrInstanceName, streamProcessorReq).Execute() + if err != nil { + return HandleError(apiResp, constants.CREATE, err) + } + + timeoutStr := "" + if currentModel.Timeouts != nil && currentModel.Timeouts.Create != nil { + timeoutStr = *currentModel.Timeouts.Create + } + + deleteOnCreateTimeout := true + if currentModel.DeleteOnCreateTimeout != nil { + deleteOnCreateTimeout = *currentModel.DeleteOnCreateTimeout + } + + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Creating stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: defaultCallbackDelaySeconds, + CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "needsStarting": needsStarting, + "startTime": time.Now().Format(time.RFC3339), + "timeoutDuration": timeoutStr, + "deleteOnCreateTimeout": deleteOnCreateTimeout, + }), + }, nil +} + +// Read handles the Read event from the Cloudformation service. +func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, ReadRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + streamProcessor, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(context.Background(), + &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + }, nil + } + return HandleError(apiResp, constants.READ, err) + } + + resourceModel, err := GetStreamProcessorModel(streamProcessor, currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + }, nil + } + + CopyIdentifyingFields(resourceModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "Read Completed", + ResourceModel: resourceModel, + }, nil +} + +// Update handles the Update event from the Cloudformation service. +func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, UpdateRequiredFields) + if peErr != nil { + return *peErr, nil + } + + if IsCallback(&req) { + callbackCtx := GetCallbackData(req) + if peErr := ValidateCallbackData(callbackCtx); peErr != nil { + return *peErr, nil + } + return HandleUpdateCallback( + context.Background(), + atlasClient, + currentModel, + callbackCtx, + ) + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + requestParams := &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + } + + currentStreamProcessor, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + }, nil + } + return HandleError(apiResp, constants.READ, err) + } + + currentState := currentStreamProcessor.GetState() + + desiredState := currentState + if currentModel.DesiredState != nil && *currentModel.DesiredState != "" { + desiredState = *currentModel.DesiredState + } else if prevModel != nil && prevModel.DesiredState != nil && *prevModel.DesiredState != "" { + desiredState = *prevModel.DesiredState + } + + if errMsg, isValid := ValidateUpdateStateTransition(currentState, desiredState); !isValid { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: errMsg, + }, nil + } + + if currentState == StartedState { + _, err := atlasClient.StreamsApi.StopStreamProcessorWithParams(ctx, + &admin.StopStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), + }, nil + } + + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Stopping stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: defaultCallbackDelaySeconds, + CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "desiredState": desiredState, + }), + }, nil + } + + modifyAPIRequestParams, err := NewStreamProcessorUpdateReq(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating update request: %s", err.Error()), + }, nil + } + + streamProcessorResp, apiResp, err := atlasClient.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() + if err != nil { + return HandleError(apiResp, constants.UPDATE, err) + } + + if desiredState == StartedState { + _, err := atlasClient.StreamsApi.StartStreamProcessorWithParams(ctx, + &admin.StartStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error starting stream processor: %s", err.Error()), + }, nil + } + + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Starting stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: defaultCallbackDelaySeconds, + CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "desiredState": desiredState, + }), + }, nil + } + + return FinalizeModel(streamProcessorResp, currentModel, "Update Completed") +} + +// List handles the List event from the Cloudformation service. +func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, ListRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + + accumulatedProcessors, apiResp, err := getAllStreamProcessors(ctx, atlasClient, projectID, workspaceOrInstanceName) + if err != nil { + return HandleError(apiResp, constants.LIST, err) + } + + response := make([]interface{}, 0, len(accumulatedProcessors)) + for i := range accumulatedProcessors { + model, err := GetStreamProcessorModel(&accumulatedProcessors[i], currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + }, nil + } + + CopyIdentifyingFields(model, currentModel) + response = append(response, model) + } + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "List Completed", + ResourceModels: response, + }, nil +} + +// Delete handles the Delete event from the CloudFormation service. +func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, DeleteRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + apiResp, err := atlasClient.StreamsApi.DeleteStreamProcessor(ctx, projectID, workspaceOrInstanceName, processorName).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + }, nil + } + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error deleting stream processor: %s", err.Error()), + }, nil + } + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "Delete Completed", + }, nil +} diff --git a/cfn-resources/stream-processor/cmd/resource/resource_test.go b/cfn-resources/stream-processor/cmd/resource/resource_test.go new file mode 100644 index 000000000..1759f294b --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/resource_test.go @@ -0,0 +1,513 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource_test + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312010/mockadmin" +) + +var ( + baseResourceModel = &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + } + validProcessor = &admin.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } +) + +func TestList(t *testing.T) { + originalInitEnv := resource.InitEnvWithLatestClient + defer func() { + resource.InitEnvWithLatestClient = originalInitEnv + }() + + testCases := map[string]struct { + currentModel *resource.Model + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + expectedCount int + }{ + "successfulListSinglePage": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) + processors := &admin.PaginatedApiStreamsStreamProcessorWithStats{ + Results: &[]admin.StreamsProcessorWithStats{ + {Name: "processor-1", Id: "507f1f77bcf86cd799439011", State: resource.CreatedState}, + {Name: "processor-2", Id: "507f1f77bcf86cd799439012", State: resource.StartedState}, + }, + TotalCount: util.Pointer(2), + } + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + expectedCount: 2, + }, + "listWithApiError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("server error")) + }, + expectedStatus: handler.Failed, + expectedCount: 0, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin.APIClient{StreamsApi: mockStreamsAPI} + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + + event, err := resource.List(handler.Request{RequestContext: handler.RequestContext{}}, nil, tc.currentModel) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + + if tc.expectedStatus == handler.Success { + require.NotNil(t, event.ResourceModels) + assert.Len(t, event.ResourceModels, tc.expectedCount) + } + }) + } +} + +func TestSetup(t *testing.T) { + assert.NotPanics(t, func() { + resource.Setup() + }) +} + +func TestValidationErrors(t *testing.T) { + validationModels := map[string]*resource.Model{ + "missingProjectId": { + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + "missingProcessorName": { + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + } + + operations := map[string]func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error){ + "Create": resource.Create, + "Read": resource.Read, + "Update": resource.Update, + "Delete": resource.Delete, + } + + for opName, operation := range operations { + for modelName, model := range validationModels { + t.Run(opName+"_"+modelName, func(t *testing.T) { + event, err := operation(handler.Request{RequestContext: handler.RequestContext{}}, nil, model) + require.NoError(t, err) + assert.Equal(t, handler.Failed, event.OperationStatus) + assert.Contains(t, event.Message, "required") + }) + } + } +} + +func setupMockClient(t *testing.T, mockSetup func(*mockadmin.StreamsApi)) func() { + t.Helper() + originalInitEnv := resource.InitEnvWithLatestClient + mockStreamsAPI := mockadmin.NewStreamsApi(t) + mockSetup(mockStreamsAPI) + + mockClient := &admin.APIClient{StreamsApi: mockStreamsAPI} + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + + return func() { resource.InitEnvWithLatestClient = originalInitEnv } +} + +func TestCRUDOperations(t *testing.T) { + testCases := map[string]struct { + operation func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) + prevModel *resource.Model + currentModel *resource.Model + mockSetup func(*mockadmin.StreamsApi) + validateResult func(t *testing.T, event handler.ProgressEvent) + expectedStatus handler.Status + req handler.Request + }{ + "Create_invalidState": { + operation: resource.Create, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.DesiredState = util.StringPtr("INVALID_STATE") + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + }, + "Create_apiError": { + operation: resource.Create, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.CreateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("API error")) + }, + expectedStatus: handler.Failed, + }, + "Create_withCallback": { + operation: resource.Create, + req: handler.Request{ + CallbackContext: map[string]any{ + "callbackStreamProcessor": true, + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + "needsStarting": false, + "startTime": time.Now().Format(time.RFC3339), + "timeoutDuration": "20m", + "deleteOnCreateTimeout": false, + }, + }, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "Create_withDesiredStateStarted": { + operation: resource.Create, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.DesiredState = util.StringPtr(resource.StartedState) + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.CreateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "Create_withTimeouts": { + operation: resource.Create, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.Timeouts = &resource.Timeouts{Create: util.StringPtr("30m")} + m.DeleteOnCreateTimeout = util.Pointer(false) + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.CreateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "Create_invalidPipeline": { + operation: resource.Create, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.Pipeline = util.StringPtr("invalid json") + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + }, + "Create_missingWorkspaceAndInstance": { + operation: resource.Create, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + }, + "Read_success": { + operation: resource.Read, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "Read_notFound": { + operation: resource.Read, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) + }, + expectedStatus: handler.Failed, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Equal(t, "NotFound", event.HandlerErrorCode) + }, + }, + "Read_apiError": { + operation: resource.Read, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) + }, + expectedStatus: handler.Failed, + }, + "Read_missingWorkspaceAndInstance": { + operation: resource.Read, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + }, + "Update_withCallback": { + operation: resource.Update, + req: handler.Request{ + CallbackContext: map[string]any{ + "callbackStreamProcessor": true, + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + "desiredState": resource.CreatedState, + }, + }, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "Update_notFound": { + operation: resource.Update, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) + }, + expectedStatus: handler.Failed, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Equal(t, "NotFound", event.HandlerErrorCode) + }, + }, + "Update_invalidStateTransition": { + operation: resource.Update, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.DesiredState = util.StringPtr(resource.CreatedState) + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin.StreamsProcessorWithStats{Name: "processor-1", State: resource.StartedState} + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Failed, + }, + "Update_stopError": { + operation: resource.Update, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.DesiredState = util.StringPtr(resource.StoppedState) + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin.StreamsProcessorWithStats{Name: "processor-1", State: resource.StartedState} + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + stopReq := admin.StopStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) + m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("stop failed")) + }, + expectedStatus: handler.Failed, + }, + "Update_stopSuccess": { + operation: resource.Update, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.DesiredState = util.StringPtr(resource.StoppedState) + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin.StreamsProcessorWithStats{Name: "processor-1", State: resource.StartedState} + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + stopReq := admin.StopStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) + m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(&http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "Update_startError": { + operation: resource.Update, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.DesiredState = util.StringPtr(resource.StartedState) + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + startReq := admin.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("start failed")) + }, + expectedStatus: handler.Failed, + }, + "Update_startSuccess": { + operation: resource.Update, + currentModel: func() *resource.Model { + m := *baseResourceModel + m.DesiredState = util.StringPtr(resource.StartedState) + return &m + }(), + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + startReq := admin.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedStatus: handler.InProgress, + }, + "Update_successWithoutStarting": { + operation: resource.Update, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "Update_apiError": { + operation: resource.Update, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) + updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("update failed")) + }, + expectedStatus: handler.Failed, + }, + "Delete_success": { + operation: resource.Delete, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(&http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "Delete_notFound": { + operation: resource.Delete, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(&http.Response{StatusCode: 404}, fmt.Errorf("not found")) + }, + expectedStatus: handler.Failed, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Equal(t, "NotFound", event.HandlerErrorCode) + }, + }, + "Delete_apiError": { + operation: resource.Delete, + currentModel: baseResourceModel, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("delete failed")) + }, + expectedStatus: handler.Failed, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + cleanup := setupMockClient(t, tc.mockSetup) + defer cleanup() + + event, err := tc.operation(tc.req, tc.prevModel, tc.currentModel) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + if tc.validateResult != nil { + tc.validateResult(t, event) + } + }) + } +} diff --git a/cfn-resources/stream-processor/docs/README.md b/cfn-resources/stream-processor/docs/README.md new file mode 100644 index 000000000..3cae09fda --- /dev/null +++ b/cfn-resources/stream-processor/docs/README.md @@ -0,0 +1,179 @@ +# MongoDB::Atlas::StreamProcessor + +Returns, adds, edits, and removes Atlas Stream Processors. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Type" : "MongoDB::Atlas::StreamProcessor",
+    "Properties" : {
+        "Profile" : String,
+        "ProjectId" : String,
+        "InstanceName" : String,
+        "WorkspaceName" : String,
+        "ProcessorName" : String,
+        "Pipeline" : String,
+        "DesiredState" : String,
+        "Options" : StreamsOptions,
+        "Timeouts" : Timeouts,
+        "DeleteOnCreateTimeout" : Boolean
+    }
+}
+
+ +### YAML + +
+Type: MongoDB::Atlas::StreamProcessor
+Properties:
+    Profile: String
+    ProjectId: String
+    InstanceName: String
+    WorkspaceName: String
+    ProcessorName: String
+    Pipeline: String
+    DesiredState: String
+    Options: StreamsOptions
+    Timeouts: Timeouts
+    DeleteOnCreateTimeout: Boolean
+
+ +## Properties + +#### Profile + +Profile used to provide credentials information, (a secret with the cfn/atlas/profile/{Profile}, is required), if not provided default is used + +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### ProjectId + +Unique 24-hexadecimal digit string that identifies your project. + +**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups. + +_Required_: Yes + +_Type_: String + +_Minimum Length_: 24 + +_Maximum Length_: 24 + +_Pattern_: ^([a-f0-9]{24})$ + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### InstanceName + +Label that identifies the stream processing workspace. This field is deprecated in favor of WorkspaceName. Exactly one of InstanceName or WorkspaceName must be provided. + +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### WorkspaceName + +Label that identifies the stream processing workspace. This is the preferred field name. Exactly one of InstanceName or WorkspaceName must be provided. + +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### ProcessorName + +Label that identifies the stream processor. + +_Required_: Yes + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### Pipeline + +Stream aggregation pipeline you want to apply to your streaming data. This should be a JSON-encoded array of pipeline stages. Refer to MongoDB Atlas Docs for more information on stream aggregation pipelines. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### DesiredState + +The desired state of the stream processor. Used to start or stop the Stream Processor. Valid values are CREATED, STARTED or STOPPED. When a Stream Processor is created without specifying the desired state, it will default to CREATED state. When a Stream Processor is updated without specifying the desired state, it will default to the Previous state. + +**NOTE** When a Stream Processor is updated without specifying the desired state, it is stopped and then restored to previous state upon update completion. + +_Required_: No + +_Type_: String + +_Allowed Values_: CREATED | STARTED | STOPPED + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Options + +Optional configuration for the stream processor. + +_Required_: No + +_Type_: StreamsOptions + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Timeouts + +Configurable timeouts for stream processor operations. + +_Required_: No + +_Type_: Timeouts + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### DeleteOnCreateTimeout + +Indicates whether to delete the resource being created if a timeout is reached when waiting for completion. When set to `true` and timeout occurs, it triggers the deletion and returns immediately without waiting for deletion to complete. When set to `false`, the timeout will not trigger resource deletion. If you suspect a transient error when the value is `true`, wait before retrying to allow resource deletion to finish. Default is `true`. + +_Required_: No + +_Type_: Boolean + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +## Return Values + +### Fn::GetAtt + +The `Fn::GetAtt` intrinsic function returns a value for a specified attribute of this type. The following are the available attributes and sample return values. + +For more information about using the `Fn::GetAtt` intrinsic function, see [Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getatt.html). + +#### Id + +Unique 24-hexadecimal character string that identifies the stream processor. + +#### Stats + +The stats associated with the stream processor as a JSON string. Refer to the MongoDB Atlas Docs for more information. + +#### State + +The actual current state of the stream processor as returned by the Atlas API. Commonly occurring states are 'CREATED', 'STARTED', 'STOPPED' and 'FAILED'. This is a read-only property that reflects the real-time state of the processor. + diff --git a/cfn-resources/stream-processor/docs/streamsdlq.md b/cfn-resources/stream-processor/docs/streamsdlq.md new file mode 100644 index 000000000..e99782f57 --- /dev/null +++ b/cfn-resources/stream-processor/docs/streamsdlq.md @@ -0,0 +1,58 @@ +# MongoDB::Atlas::StreamProcessor StreamsDLQ + +Dead letter queue for the stream processor. Refer to the MongoDB Atlas Docs for more information. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Coll" : String,
+    "ConnectionName" : String,
+    "Db" : String
+}
+
+ +### YAML + +
+Coll: String
+ConnectionName: String
+Db: String
+
+ +## Properties + +#### Coll + +Name of the collection to use for the DLQ. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### ConnectionName + +Name of the connection to write DLQ messages to. Must be an Atlas connection. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Db + +Name of the database to use for the DLQ. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-processor/docs/streamsoptions.md b/cfn-resources/stream-processor/docs/streamsoptions.md new file mode 100644 index 000000000..015dd98f3 --- /dev/null +++ b/cfn-resources/stream-processor/docs/streamsoptions.md @@ -0,0 +1,34 @@ +# MongoDB::Atlas::StreamProcessor StreamsOptions + +Optional configuration for the stream processor. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Dlq" : StreamsDLQ
+}
+
+ +### YAML + +
+Dlq: StreamsDLQ
+
+ +## Properties + +#### Dlq + +Dead letter queue for the stream processor. Refer to the MongoDB Atlas Docs for more information. + +_Required_: Yes + +_Type_: StreamsDLQ + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-processor/docs/timeouts.md b/cfn-resources/stream-processor/docs/timeouts.md new file mode 100644 index 000000000..08c397cd8 --- /dev/null +++ b/cfn-resources/stream-processor/docs/timeouts.md @@ -0,0 +1,34 @@ +# MongoDB::Atlas::StreamProcessor Timeouts + +Configurable timeouts for stream processor operations. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Create" : String
+}
+
+ +### YAML + +
+Create: String
+
+ +## Properties + +#### Create + +Timeout for create operation in Go duration format (e.g., '5m', '10s'). Default is 20 minutes. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json new file mode 100644 index 000000000..6f9778809 --- /dev/null +++ b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json @@ -0,0 +1,147 @@ +{ + "typeName": "MongoDB::Atlas::StreamProcessor", + "description": "Returns, adds, edits, and removes Atlas Stream Processors.", + "sourceUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources.git", + "definitions": { + "StreamsDLQ": { + "type": "object", + "description": "Dead letter queue for the stream processor. Refer to the MongoDB Atlas Docs for more information.", + "properties": { + "Coll": { + "type": "string", + "description": "Name of the collection to use for the DLQ." + }, + "ConnectionName": { + "type": "string", + "description": "Name of the connection to write DLQ messages to. Must be an Atlas connection." + }, + "Db": { + "type": "string", + "description": "Name of the database to use for the DLQ." + } + }, + "required": ["Coll", "ConnectionName", "Db"], + "additionalProperties": false + }, + "StreamsOptions": { + "type": "object", + "description": "Optional configuration for the stream processor.", + "properties": { + "Dlq": { + "$ref": "#/definitions/StreamsDLQ" + } + }, + "required": ["Dlq"], + "additionalProperties": false + }, + "Timeouts": { + "type": "object", + "description": "Configurable timeouts for stream processor operations.", + "properties": { + "Create": { + "type": "string", + "description": "Timeout for create operation in Go duration format (e.g., '5m', '10s'). Default is 20 minutes." + } + }, + "additionalProperties": false + } + }, + "properties": { + "Profile": { + "type": "string", + "description": "Profile used to provide credentials information, (a secret with the cfn/atlas/profile/{Profile}, is required), if not provided default is used", + "default": "default" + }, + "ProjectId": { + "type": "string", + "description": "Unique 24-hexadecimal digit string that identifies your project. \n\n**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups.", + "maxLength": 24, + "minLength": 24, + "pattern": "^([a-f0-9]{24})$" + }, + "InstanceName": { + "type": "string", + "description": "Label that identifies the stream processing workspace. This field is deprecated in favor of WorkspaceName. Exactly one of InstanceName or WorkspaceName must be provided." + }, + "WorkspaceName": { + "type": "string", + "description": "Label that identifies the stream processing workspace. This is the preferred field name. Exactly one of InstanceName or WorkspaceName must be provided." + }, + "ProcessorName": { + "type": "string", + "description": "Label that identifies the stream processor." + }, + "Pipeline": { + "type": "string", + "description": "Stream aggregation pipeline you want to apply to your streaming data. This should be a JSON-encoded array of pipeline stages. Refer to MongoDB Atlas Docs for more information on stream aggregation pipelines." + }, + "DesiredState": { + "type": "string", + "description": "The desired state of the stream processor. Used to start or stop the Stream Processor. Valid values are CREATED, STARTED or STOPPED. When a Stream Processor is created without specifying the desired state, it will default to CREATED state. When a Stream Processor is updated without specifying the desired state, it will default to the Previous state.\n\n**NOTE** When a Stream Processor is updated without specifying the desired state, it is stopped and then restored to previous state upon update completion.", + "enum": ["CREATED", "STARTED", "STOPPED"] + }, + "State": { + "type": "string", + "description": "The actual current state of the stream processor as returned by the Atlas API. Commonly occurring states are 'CREATED', 'STARTED', 'STOPPED' and 'FAILED'. This is a read-only property that reflects the real-time state of the processor." + }, + "Options": { + "$ref": "#/definitions/StreamsOptions" + }, + "Id": { + "type": "string", + "description": "Unique 24-hexadecimal character string that identifies the stream processor." + }, + "Stats": { + "type": "string", + "description": "The stats associated with the stream processor as a JSON string. Refer to the MongoDB Atlas Docs for more information." + }, + "Timeouts": { + "$ref": "#/definitions/Timeouts", + "description": "Configurable timeouts for stream processor operations." + }, + "DeleteOnCreateTimeout": { + "type": "boolean", + "description": "Indicates whether to delete the resource being created if a timeout is reached when waiting for completion. When set to `true` and timeout occurs, it triggers the deletion and returns immediately without waiting for deletion to complete. When set to `false`, the timeout will not trigger resource deletion. If you suspect a transient error when the value is `true`, wait before retrying to allow resource deletion to finish. Default is `true`." + } + }, + "additionalProperties": false, + "required": ["ProjectId", "ProcessorName", "Pipeline"], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Stats", + "/properties/State" + ], + "writeOnlyProperties": ["/properties/DeleteOnCreateTimeout"], + "primaryIdentifier": [ + "/properties/ProjectId", + "/properties/InstanceName", + "/properties/WorkspaceName", + "/properties/ProcessorName", + "/properties/Profile" + ], + "createOnlyProperties": [ + "/properties/ProjectId", + "/properties/InstanceName", + "/properties/WorkspaceName", + "/properties/ProcessorName", + "/properties/Profile" + ], + "handlers": { + "create": { + "permissions": ["secretsmanager:GetSecretValue"] + }, + "read": { + "permissions": ["secretsmanager:GetSecretValue"] + }, + "update": { + "permissions": ["secretsmanager:GetSecretValue"] + }, + "delete": { + "permissions": ["secretsmanager:GetSecretValue"] + } + }, + "documentationUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources/blob/master/cfn-resources/stream-processor/README.md", + "tagging": { + "taggable": false + } +} diff --git a/cfn-resources/stream-processor/resource-role.yaml b/cfn-resources/stream-processor/resource-role.yaml new file mode 100644 index 000000000..bc6022d7d --- /dev/null +++ b/cfn-resources/stream-processor/resource-role.yaml @@ -0,0 +1,38 @@ +AWSTemplateFormatVersion: "2010-09-09" +Description: > + This CloudFormation template creates a role assumed by CloudFormation + during CRUDL operations to mutate resources on behalf of the customer. + +Resources: + ExecutionRole: + Type: AWS::IAM::Role + Properties: + MaxSessionDuration: 8400 + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: resources.cloudformation.amazonaws.com + Action: sts:AssumeRole + Condition: + StringEquals: + aws:SourceAccount: + Ref: AWS::AccountId + StringLike: + aws:SourceArn: + Fn::Sub: arn:${AWS::Partition}:cloudformation:${AWS::Region}:${AWS::AccountId}:type/resource/MongoDB-Atlas-StreamProcessor/* + Path: "/" + Policies: + - PolicyName: ResourceTypePolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - "secretsmanager:GetSecretValue" + Resource: "*" +Outputs: + ExecutionRoleArn: + Value: + Fn::GetAtt: ExecutionRole.Arn diff --git a/cfn-resources/stream-processor/template.yml b/cfn-resources/stream-processor/template.yml new file mode 100644 index 000000000..ad114f643 --- /dev/null +++ b/cfn-resources/stream-processor/template.yml @@ -0,0 +1,27 @@ +AWSTemplateFormatVersion: "2010-09-09" +Transform: AWS::Serverless-2016-10-31 +Description: AWS SAM template for the MongoDB::Atlas::StreamProcessor resource type + +Globals: + Function: + Timeout: 180 # docker start-up times can be long for SAM CLI + MemorySize: 256 + +Resources: + TypeFunction: + Type: AWS::Serverless::Function + Properties: + Handler: bootstrap + Runtime: provided.al2 + CodeUri: bin/ + + TestEntrypoint: + Type: AWS::Serverless::Function + Properties: + Handler: bootstrap + Runtime: provided.al2 + CodeUri: bin/ + Environment: + Variables: + MODE: Test + diff --git a/cfn-resources/stream-processor/test/README.md b/cfn-resources/stream-processor/test/README.md new file mode 100644 index 000000000..c52bf2154 --- /dev/null +++ b/cfn-resources/stream-processor/test/README.md @@ -0,0 +1,161 @@ +# MongoDB::Atlas::StreamProcessor + +## Impact +The following components use this resource and are potentially impacted by any changes. They should also be validated to ensure the changes do not cause a regression. + - Stream Processor L1 CDK constructor + + +## Prerequisites +### Resources needed to run the manual QA +All resources are created as part of `cfn-testing-helper.sh`: + +- Atlas Project +- Atlas Stream Instance/Workspace (LONG-RUNNING operation, can take 10-30+ minutes) +- Cluster (for DLQ connection testing - inputs_3) +- Stream Connection (for DLQ connection testing - inputs_3) + +**IMPORTANT**: Stream Instance/Workspace creation is a LONG-RUNNING operation that can take 10-30+ minutes. The `cfn-test-create-inputs.sh` script will create the workspace and wait for it to be ready before proceeding. + +## Manual QA +Please follow the steps in [TESTING.md](../../../TESTING.md). + + +### Success criteria when testing the resource + +#### 1. Resource Creation Verification + +A Stream Processor should be created in the specified test project for the specified Atlas Stream workspace/instance: + +**Atlas UI Verification:** +- Navigate to Atlas UI → Your Project → Stream Processing +- Select the stream workspace/instance used in the test +- Go to the **Processors** tab +- Verify the processor appears with: + - **Name**: Matches the `ProcessorName` from the test input + - **State**: Matches the `State` in the template (CREATED, STARTED, or STOPPED) + - **Pipeline**: Click on the processor to view details and verify: + - Pipeline stages match the `Pipeline` configuration in the template + - Source connection name is correct + - Merge target connection, database, and collection are correct + +**Atlas CLI Verification:** +```bash +atlas streams processors describe \ + --instance \ + --projectId +``` +- Verify `id` field is present (matches CloudFormation `Id` attribute) +- Verify `name` matches `ProcessorName` +- Verify `state` matches `State` parameter +- Verify `pipeline` array matches the `Pipeline` JSON string + +#### 2. DLQ Configuration Verification (inputs_3) + +For processors with DLQ configuration: +- In Atlas UI: Verify DLQ settings are displayed in processor details +- Via Atlas CLI: Verify `options.dlq` object contains: + - `connectionName`: Matches `Options.Dlq.ConnectionName` + - `db`: Matches `Options.Dlq.Db` + - `coll`: Matches `Options.Dlq.Coll` + +#### 3. Backward Compatibility Testing + +Test both field names work correctly: +- **Test with `WorkspaceName`** (preferred field): + - Create processor using `WorkspaceName` parameter + - Verify processor is created successfully + - Verify both `WorkspaceName` and `InstanceName` are set in returned model (for primary identifier) +- **Test with `InstanceName`** (deprecated field): + - Create processor using `InstanceName` parameter + - Verify processor is created successfully + - Verify both `WorkspaceName` and `InstanceName` are set in returned model + - Verify `WorkspaceName` is automatically set from `InstanceName` for forward compatibility + +#### 4. State Transition Testing + +Test all valid state transitions: +- **Create with `State: CREATED`**: + - Verify processor is created in CREATED state + - Verify processor does not start processing automatically +- **Create with `State: STARTED`**: + - Verify processor is created and transitions to STARTED state + - Verify this is a long-running operation (may take several minutes) + - Verify callback-based state management handles the transition +- **Update state from CREATED to STARTED**: + - Verify processor stops (if needed) before update + - Verify processor starts after update completes + - Verify state transition is successful +- **Update state from STARTED to STOPPED**: + - Verify processor stops before update + - Verify processor remains stopped after update + - Verify state transition is successful + +#### 5. Timeout and Cleanup Behavior + +- **Verify `Timeouts.Create` is respected**: + - Set a short timeout (e.g., 1 minute) for a processor that takes longer to start + - Verify timeout is triggered after the specified duration +- **Verify `DeleteOnCreateTimeout` behavior**: + - When `DeleteOnCreateTimeout: true` and timeout occurs: + - Verify processor deletion is triggered + - Verify resource is cleaned up from Atlas + - When `DeleteOnCreateTimeout: false` and timeout occurs: + - Verify processor is not deleted + - Verify resource remains in Atlas (may be in partial state) + +#### 6. Primary Identifier Verification + +Verify all primary identifier fields are present in returned models: +- `ProjectId`: Always present +- `WorkspaceName`: Always present (set from `InstanceName` if needed) +- `InstanceName`: Always present (set from `WorkspaceName` if needed) +- `ProcessorName`: Always present +- `Profile`: Always present + +This is critical for CloudFormation to properly track the resource. + +#### 7. General CFN Resource Success Criteria + +Ensure general [CFN resource success criteria](../../../TESTING.md#success-criteria-when-testing-the-resource) for this resource is met: +- All CRUD operations work correctly +- Read-after-Create returns correct values +- Update operations preserve primary identifier +- Delete operations clean up resources +- Error handling is appropriate + + +## Important Links +- [API Documentation](https://www.mongodb.com/docs/api/doc/atlas-admin-api-v2/group/endpoint-streams) +- [Resource Usage Documentation](https://www.mongodb.com/docs/atlas/atlas-sp/overview/) + +## Unit Testing Locally + +The local tests are integrated with the AWS `sam local` and `cfn invoke` tooling features: + +``` +sam local start-lambda --skip-pull-image +``` +then in another shell: +```bash +repo_root=$(git rev-parse --show-toplevel) +cd ${repo_root}/cfn-resources/stream-processor +cfn invoke resource CREATE stream-processor.sample-cfn-request.json +cfn invoke resource DELETE stream-processor.sample-cfn-request.json +cd - +``` + +Both CREATE & DELETE tests must pass. + +## Test Input Files + +The test directory contains the following input files: + +- `inputs_1_create.template.json` / `inputs_1_update.template.json`: Basic stream processor with WorkspaceName, CREATED state +- `inputs_2_create.template.json` / `inputs_2_update.template.json`: Stream processor with STARTED state, timeout configuration, and DeleteOnCreateTimeout +- `inputs_3_create.template.json` / `inputs_3_update.template.json`: Stream processor with InstanceName (backward compatibility) and DLQ options + +All input files respect: +- AWS-only behavior (no Azure/GCP-only parameters) +- Required fields: ProjectId, ProcessorName, Pipeline +- Backward compatibility: Supports both WorkspaceName and InstanceName +- Schema validation: All fields match the final CFN schema diff --git a/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh b/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh new file mode 100755 index 000000000..27796c305 --- /dev/null +++ b/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh @@ -0,0 +1,311 @@ +#!/usr/bin/env bash +# cfn-test-create-inputs.sh +# +# This tool generates json files in the inputs/ for `cfn test`. +# + +set -o errexit +set -o nounset +set -o pipefail + +rm -rf inputs +mkdir inputs + +#set profile +profile="default" +if [ ${MONGODB_ATLAS_PROFILE+x} ]; then + echo "profile set to ${MONGODB_ATLAS_PROFILE}" + profile=${MONGODB_ATLAS_PROFILE} +fi + +projectName="${1:-$PROJECT_NAME}" +echo "$projectName" +projectId=$(atlas projects list --output json | jq --arg NAME "${projectName}" -r '.results[] | select(.name==$NAME) | .id') +if [ -z "$projectId" ]; then + projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') + + echo -e "Created project \"${projectName}\" with id: ${projectId}\n" +else + echo -e "FOUND project \"${projectName}\" with id: ${projectId}\n" +fi +echo -e "=====\nrun this command to clean up\n=====\nmongocli iam projects delete ${projectId} --force\n=====" + +# Create Stream Instance/Workspace (this is a LONG-RUNNING operation, can take 10-30+ minutes) +workspaceName="stream-workspace-$(date +%s)-$RANDOM" +cloudProvider="AWS" + +echo -e "Creating Stream Instance/Workspace \"${workspaceName}\" (this may take 10-30+ minutes)...\n" +atlas streams instances create "${workspaceName}" --projectId "${projectId}" --region VIRGINIA_USA --provider ${cloudProvider} +echo -e "Waiting for Stream Instance/Workspace \"${workspaceName}\" to be ready...\n" +# Poll until the stream instance is ready (watch command doesn't exist for stream instances) +while true; do + hostnames=$(atlas streams instances describe "${workspaceName}" --projectId "${projectId}" --output json 2>/dev/null | jq -r '.hostnames[]? // empty' 2>/dev/null | head -1) + if [ -n "$hostnames" ]; then + echo -e "Stream Instance/Workspace \"${workspaceName}\" is ready\n" + break + fi + sleep 10 +done + +# For inputs_3 (DLQ testing), we need a cluster and stream connection +# Create cluster for DLQ connection (if needed) +clusterName="cluster-$(date +%s)-$RANDOM" +connectionName="stream-connection-$(date +%s)-$RANDOM" + +echo -e "Creating Cluster \"${clusterName}\" for DLQ connection...\n" +atlas clusters create "${clusterName}" --projectId "${projectId}" --backup --provider AWS --region US_EAST_1 --members 3 --tier M10 --diskSizeGB 10 --output=json +atlas clusters watch "${clusterName}" --projectId "${projectId}" +echo -e "Created Cluster \"${clusterName}\"\n" + +echo -e "Creating Stream Connection \"${connectionName}\" for DLQ...\n" +# Create temporary JSON file for connection configuration using jq (consistent with rest of script) +connectionConfig=$(mktemp).json +jq -n \ + --arg type "Cluster" \ + --arg clusterName "${clusterName}" \ + '{ + "type": $type, + "clusterName": $clusterName, + "dbRoleToExecute": { + "role": "atlasAdmin", + "type": "BUILT_IN" + } + }' > "${connectionConfig}" +atlas streams connections create "${connectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --file "${connectionConfig}" \ + --output=json +rm -f "${connectionConfig}" +echo -e "Created Stream Connection \"${connectionName}\"\n" + +# Create Sample connection for inputs_1 and inputs_2 (sample_stream_solar) +sampleConnectionName="sample_stream_solar" +echo -e "Creating Sample Stream Connection \"${sampleConnectionName}\" for inputs_1 and inputs_2...\n" +sampleConnectionConfig=$(mktemp).json +jq -n \ + --arg type "Sample" \ + '{ + "type": $type + }' > "${sampleConnectionConfig}" +# Check if connection already exists +if atlas streams connections describe "${sampleConnectionName}" --projectId "${projectId}" --instance "${workspaceName}" --output json >/dev/null 2>&1; then + echo "Sample connection \"${sampleConnectionName}\" already exists, skipping creation" +else + atlas streams connections create "${sampleConnectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --file "${sampleConnectionConfig}" \ + --output=json + echo -e "Created Sample Stream Connection \"${sampleConnectionName}\"\n" +fi +rm -f "${sampleConnectionConfig}" + +# Reuse the Cluster connection from inputs_3 for inputs_1 and inputs_2 sink (saves time/resources) +# No need to create a separate cluster - we'll use the same connectionName + +# Create Kafka connections for inputs_4 and inputs_5 (Kafka to Cluster and Cluster to Kafka) +# Using placeholder values matching Terraform tests (as per MongoDB team guidance) +kafkaSourceConnectionName="KafkaConnectionSrc-$(date +%s)-$RANDOM" +kafkaSinkConnectionName="KafkaConnectionDest-$(date +%s)-$RANDOM" + +echo -e "Creating Kafka Source Connection \"${kafkaSourceConnectionName}\" for inputs_4...\n" +kafkaSourceConnectionConfig=$(mktemp).json +jq -n \ + --arg type "Kafka" \ + --arg bootstrapServers "localhost:9092,localhost:9092" \ + --arg mechanism "PLAIN" \ + --arg username "user" \ + --arg password "rawpassword" \ + --arg protocol "SASL_PLAINTEXT" \ + '{ + "type": $type, + "bootstrapServers": $bootstrapServers, + "authentication": { + "mechanism": $mechanism, + "username": $username, + "password": $password + }, + "security": { + "protocol": $protocol + }, + "config": { + "auto.offset.reset": "earliest" + }, + "networking": { + "access": { + "type": "PUBLIC" + } + } + }' > "${kafkaSourceConnectionConfig}" +atlas streams connections create "${kafkaSourceConnectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --file "${kafkaSourceConnectionConfig}" \ + --output=json +rm -f "${kafkaSourceConnectionConfig}" +echo -e "Created Kafka Source Connection \"${kafkaSourceConnectionName}\"\n" + +echo -e "Creating Kafka Sink Connection \"${kafkaSinkConnectionName}\" for inputs_5...\n" +kafkaSinkConnectionConfig=$(mktemp).json +jq -n \ + --arg type "Kafka" \ + --arg bootstrapServers "localhost:9092,localhost:9092" \ + --arg mechanism "PLAIN" \ + --arg username "user" \ + --arg password "rawpassword" \ + --arg protocol "SASL_PLAINTEXT" \ + '{ + "type": $type, + "bootstrapServers": $bootstrapServers, + "authentication": { + "mechanism": $mechanism, + "username": $username, + "password": $password + }, + "security": { + "protocol": $protocol + }, + "config": { + "auto.offset.reset": "earliest" + }, + "networking": { + "access": { + "type": "PUBLIC" + } + } + }' > "${kafkaSinkConnectionConfig}" +atlas streams connections create "${kafkaSinkConnectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --file "${kafkaSinkConnectionConfig}" \ + --output=json +rm -f "${kafkaSinkConnectionConfig}" +echo -e "Created Kafka Sink Connection \"${kafkaSinkConnectionName}\"\n" + +# Generate input files +# Reuse connectionName from inputs_3 for inputs_1 and inputs_2 sink (saves creating another cluster) +# Also set InstanceName from WorkspaceName for primary identifier (both fields required) +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg sink_connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ + "$(dirname "$0")/inputs_1_create.template.json" >"inputs/inputs_1_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg sink_connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ + "$(dirname "$0")/inputs_1_update.template.json" >"inputs/inputs_1_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg sink_connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ + "$(dirname "$0")/inputs_2_create.template.json" >"inputs/inputs_2_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg sink_connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ + "$(dirname "$0")/inputs_2_update.template.json" >"inputs/inputs_2_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Options.Dlq.ConnectionName?|=$connection_name + | .Pipeline?|=gsub("CONNECTION_NAME_PLACEHOLDER"; $connection_name)' \ + "$(dirname "$0")/inputs_3_create.template.json" >"inputs/inputs_3_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Options.Dlq.ConnectionName?|=$connection_name + | .Pipeline?|=gsub("CONNECTION_NAME_PLACEHOLDER"; $connection_name)' \ + "$(dirname "$0")/inputs_3_update.template.json" >"inputs/inputs_3_update.json" + +# Generate inputs_4 (Kafka to Cluster) - using Kafka source and Cluster sink +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg kafka_source "$kafkaSourceConnectionName" \ + --arg cluster_sink "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("KAFKA_SOURCE_CONNECTION_PLACEHOLDER"; $kafka_source) + | .Pipeline?|=gsub("CLUSTER_SINK_CONNECTION_PLACEHOLDER"; $cluster_sink)' \ + "$(dirname "$0")/inputs_4_create.template.json" >"inputs/inputs_4_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg kafka_source "$kafkaSourceConnectionName" \ + --arg cluster_sink "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("KAFKA_SOURCE_CONNECTION_PLACEHOLDER"; $kafka_source) + | .Pipeline?|=gsub("CLUSTER_SINK_CONNECTION_PLACEHOLDER"; $cluster_sink)' \ + "$(dirname "$0")/inputs_4_update.template.json" >"inputs/inputs_4_update.json" + +# Generate inputs_5 (Cluster to Kafka) - using Cluster source and Kafka sink +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg cluster_source "$connectionName" \ + --arg kafka_sink "$kafkaSinkConnectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("CLUSTER_SOURCE_CONNECTION_PLACEHOLDER"; $cluster_source) + | .Pipeline?|=gsub("KAFKA_SINK_CONNECTION_PLACEHOLDER"; $kafka_sink)' \ + "$(dirname "$0")/inputs_5_create.template.json" >"inputs/inputs_5_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg cluster_source "$connectionName" \ + --arg kafka_sink "$kafkaSinkConnectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("CLUSTER_SOURCE_CONNECTION_PLACEHOLDER"; $cluster_source) + | .Pipeline?|=gsub("KAFKA_SINK_CONNECTION_PLACEHOLDER"; $kafka_sink)' \ + "$(dirname "$0")/inputs_5_update.template.json" >"inputs/inputs_5_update.json" + +echo -e "Test input files generated successfully in inputs/ directory\n" diff --git a/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh b/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh new file mode 100755 index 000000000..65212620f --- /dev/null +++ b/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh @@ -0,0 +1,92 @@ +#!/usr/bin/env bash +# cfn-test-delete-inputs.sh +# +# This tool deletes the mongodb resources used for `cfn test` as inputs. +# + +set -euo pipefail + +function usage { + echo "usage:$0 " +} + +projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) +workspaceName=$(jq -r '.WorkspaceName // .InstanceName' ./inputs/inputs_1_create.json) +processorName1=$(jq -r '.ProcessorName' ./inputs/inputs_1_create.json) +processorName2=$(jq -r '.ProcessorName' ./inputs/inputs_2_create.json) +processorName3=$(jq -r '.ProcessorName' ./inputs/inputs_3_create.json) + +# Delete stream processors (if they exist) +for processorName in "$processorName1" "$processorName2" "$processorName3"; do + if atlas streams processors delete "${processorName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --force 2>/dev/null; then + echo "deleted stream processor with name ${processorName}" + else + echo "failed to delete or stream processor '${processorName}' does not exist" + fi +done + +# Delete Sample connection (sample_stream_solar) if it exists +sampleConnectionName="sample_stream_solar" +if atlas streams connections delete "${sampleConnectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --force 2>/dev/null; then + echo "deleted sample stream connection with name ${sampleConnectionName}" +else + echo "failed to delete or sample stream connection '${sampleConnectionName}' does not exist" +fi + +# Get connection name from inputs_3 if it exists +if [ -f "./inputs/inputs_3_create.json" ]; then + connectionName=$(jq -r '.Options.Dlq.ConnectionName // empty' ./inputs/inputs_3_create.json) + if [ -n "$connectionName" ]; then + if atlas streams connections delete "${connectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --force 2>/dev/null; then + echo "deleted stream connection with name ${connectionName}" + else + echo "failed to delete or stream connection '${connectionName}' does not exist" + fi + fi +fi + +# Delete all clusters in the project (created for DLQ testing) +# The cluster name is not stored in input JSON, so we list and delete all clusters +# Clusters must be deleted before stream instance and project to avoid dependency conflicts +echo "Checking for clusters to delete in project ${projectId}..." +clusterList=$(atlas clusters list --projectId "${projectId}" --output json 2>/dev/null | jq -r '.results[]?.name // empty' 2>/dev/null || echo "") +if [ -n "$clusterList" ]; then + while IFS= read -r clusterName; do + if [ -n "$clusterName" ] && [ "$clusterName" != "null" ] && [ "$clusterName" != "" ]; then + if atlas cluster delete "${clusterName}" --projectId "${projectId}" --force 2>/dev/null; then + echo "deleting cluster with name ${clusterName}" + # Wait for cluster deletion to complete + atlas cluster watch "${clusterName}" --projectId "${projectId}" 2>/dev/null || true + else + echo "failed to delete or cluster '${clusterName}' does not exist" + fi + fi + done <<< "$clusterList" +else + echo "No clusters found in project" +fi + +# Delete stream instance/workspace (after clusters are deleted) +if atlas streams instances delete "${workspaceName}" --projectId "${projectId}" --force 2>/dev/null; then + echo "deleting stream instance/workspace with name ${workspaceName}" + # Wait for deletion to complete + atlas streams instances watch "${workspaceName}" --projectId "${projectId}" 2>/dev/null || true +else + echo "failed to delete or stream instance/workspace '${workspaceName}' does not exist" +fi + +#delete project +if atlas projects delete "$projectId" --force 2>/dev/null; then + echo "$projectId project deletion OK" +else + (echo "Failed cleaning project:$projectId" && exit 1) +fi diff --git a/cfn-resources/stream-processor/test/contract-testing/cfn-test-create.sh b/cfn-resources/stream-processor/test/contract-testing/cfn-test-create.sh new file mode 100755 index 000000000..4b795316e --- /dev/null +++ b/cfn-resources/stream-processor/test/contract-testing/cfn-test-create.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# This tool generates the resources and json files in the inputs/ for `cfn test`. +set -o errexit +set -o nounset +set -o pipefail + +projectName="cfn-test-bot-$(date +%s)-$RANDOM" + +# create project +projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') + +echo "projectId: $projectId" +echo "projectName: $projectName" + +./test/cfn-test-create-inputs.sh "$projectName" diff --git a/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh b/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh new file mode 100755 index 000000000..71286ddfb --- /dev/null +++ b/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# This tool deletes the mongodb resources used for `cfn test` as inputs. +set -o errexit +set -o nounset +set -o pipefail + +projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) + +# delete project +if atlas projects delete "$projectId" --force; then + echo "$projectId project deletion OK" +else + (echo "Failed cleaning project: $projectId" && exit 1) +fi diff --git a/cfn-resources/stream-processor/test/inputs_1_create.template.json b/cfn-resources/stream-processor/test/inputs_1_create.template.json new file mode 100644 index 000000000..82b52ad69 --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_1_create.template.json @@ -0,0 +1,8 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-1", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "State": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/inputs_1_update.template.json b/cfn-resources/stream-processor/test/inputs_1_update.template.json new file mode 100644 index 000000000..82b52ad69 --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_1_update.template.json @@ -0,0 +1,8 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-1", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "State": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/inputs_2_create.template.json b/cfn-resources/stream-processor/test/inputs_2_create.template.json new file mode 100644 index 000000000..6d805b84b --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_2_create.template.json @@ -0,0 +1,12 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-2", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "State": "STARTED", + "Timeouts": { + "Create": "25m" + }, + "DeleteOnCreateTimeout": true +} diff --git a/cfn-resources/stream-processor/test/inputs_2_update.template.json b/cfn-resources/stream-processor/test/inputs_2_update.template.json new file mode 100644 index 000000000..0a21a02be --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_2_update.template.json @@ -0,0 +1,8 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-2", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "State": "STOPPED" +} diff --git a/cfn-resources/stream-processor/test/inputs_3_create.template.json b/cfn-resources/stream-processor/test/inputs_3_create.template.json new file mode 100644 index 000000000..6a1de915b --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_3_create.template.json @@ -0,0 +1,16 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-3", + "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "State": "CREATED", + "Options": { + "Dlq": { + "Coll": "dlq-collection", + "ConnectionName": "", + "Db": "dlq-database" + } + } +} diff --git a/cfn-resources/stream-processor/test/inputs_3_update.template.json b/cfn-resources/stream-processor/test/inputs_3_update.template.json new file mode 100644 index 000000000..0b1c1fba6 --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_3_update.template.json @@ -0,0 +1,16 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-3", + "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "State": "CREATED", + "Options": { + "Dlq": { + "Coll": "dlq-collection-updated", + "ConnectionName": "", + "Db": "dlq-database-updated" + } + } +} diff --git a/cfn-resources/stream-processor/test/inputs_4_create.template.json b/cfn-resources/stream-processor/test/inputs_4_create.template.json new file mode 100644 index 000000000..2ee322dc9 --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_4_create.template.json @@ -0,0 +1,9 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-4-kafka-to-cluster", + "Pipeline": "[{\"$source\": {\"connectionName\": \"KAFKA_SOURCE_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}, {\"$emit\": {\"connectionName\": \"CLUSTER_SINK_CONNECTION_PLACEHOLDER\", \"db\": \"kafka\", \"coll\": \"kafka_messages\", \"timeseries\": {\"timeField\": \"ts\"}}}]", + "State": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/inputs_4_update.template.json b/cfn-resources/stream-processor/test/inputs_4_update.template.json new file mode 100644 index 000000000..afbe861ba --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_4_update.template.json @@ -0,0 +1,9 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-4-kafka-to-cluster", + "Pipeline": "[{\"$source\": {\"connectionName\": \"KAFKA_SOURCE_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}, {\"$emit\": {\"connectionName\": \"CLUSTER_SINK_CONNECTION_PLACEHOLDER\", \"db\": \"kafka\", \"coll\": \"kafka_messages_updated\", \"timeseries\": {\"timeField\": \"ts\"}}}]", + "State": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/inputs_5_create.template.json b/cfn-resources/stream-processor/test/inputs_5_create.template.json new file mode 100644 index 000000000..f0956470c --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_5_create.template.json @@ -0,0 +1,9 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-5-cluster-to-kafka", + "Pipeline": "[{\"$source\": {\"connectionName\": \"CLUSTER_SOURCE_CONNECTION_PLACEHOLDER\"}}, {\"$emit\": {\"connectionName\": \"KAFKA_SINK_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}]", + "State": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/inputs_5_update.template.json b/cfn-resources/stream-processor/test/inputs_5_update.template.json new file mode 100644 index 000000000..558e63ead --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_5_update.template.json @@ -0,0 +1,9 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-5-cluster-to-kafka", + "Pipeline": "[{\"$source\": {\"connectionName\": \"CLUSTER_SOURCE_CONNECTION_PLACEHOLDER\"}}, {\"$emit\": {\"connectionName\": \"KAFKA_SINK_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic_updated\"}}]", + "State": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json b/cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json new file mode 100644 index 000000000..bdf7671df --- /dev/null +++ b/cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json @@ -0,0 +1,11 @@ +{ + "desiredResourceState": { + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "sample-processor", + "Pipeline": "[{\"$match\": {\"status\": \"active\"}}]", + "State": "CREATED" + }, + "previousResourceState": {} +} diff --git a/cfn-resources/util/constants/constants.go b/cfn-resources/util/constants/constants.go index 4cd57df70..d9de1bd9d 100644 --- a/cfn-resources/util/constants/constants.go +++ b/cfn-resources/util/constants/constants.go @@ -157,4 +157,7 @@ const ( ConnectionName = "ConnectionName" Type = "Type" StreamConfig = "StreamConfig" + + ProcessorName = "ProcessorName" + Pipeline = "Pipeline" ) diff --git a/examples/atlas-streams/stream-processor/README.md b/examples/atlas-streams/stream-processor/README.md new file mode 100644 index 000000000..57c14cd76 --- /dev/null +++ b/examples/atlas-streams/stream-processor/README.md @@ -0,0 +1,191 @@ +# How to create a MongoDB::Atlas::StreamProcessor + +## Step 1: Activate the stream processor resource in cloudformation + +Step a: Create Role using [execution-role.yaml](../../execution-role.yaml) in examples folder. + +Step b: Search for Mongodb::Atlas::StreamProcessor resource. + + (CloudFormation > Public extensions > choose 'Third party' > Search with " Execution name prefix = MongoDB " ) + +Step c: Select and activate +Enter the RoleArn that is created in step 1. + +Your StreamProcessor Resource is ready to use. + +## Step 2: Choose a template based on your use case + +### Example 1: Basic Stream Processor ([stream-processor.json](stream-processor.json)) + +Creates a stream processor that reads from a source connection and merges data into a cluster connection. This example uses `$merge` to write data to a regular MongoDB collection. + +**Use cases:** + +- Sample data to cluster (e.g., using `sample_stream_solar`) +- Cluster to cluster data streaming +- Simple data replication + +**Parameters:** + +1. **ProjectId** - Atlas Project Id (24 hexadecimal characters) +2. **WorkspaceName** - Name of your stream instance/workspace +3. **ProcessorName** - Unique name for the stream processor +4. **SourceConnectionName** - Name of the source connection: + - For sample data: `sample_stream_solar` + - For cluster source: Your cluster connection name +5. **SinkConnectionName** - Name of the sink cluster connection (must be a cluster connection) +6. **SinkDatabase** - Target database name (optional, default: `test`) +7. **SinkCollection** - Target collection name (optional, default: `output`) +8. **DesiredState** - Desired state of the processor: `CREATED`, `STOPPED`, or `STARTED` (optional, default: `CREATED`) +9. **Profile** - Secret Manager Profile for Atlas credentials (optional, default: `default`) + +**Pipeline stages:** + +- `$source` - Reads from the source connection +- `$merge` - Merges data into the target cluster connection (for regular collections) + +### Example 2: Stream Processor with Dead Letter Queue ([stream-processor-dlq.json](stream-processor-dlq.json)) + +Creates a stream processor with Dead Letter Queue (DLQ) configuration. Failed messages are automatically sent to a DLQ collection for error handling and debugging. + +**Additional Parameters (beyond Example 1):** + +10. **DlqConnectionName** - Name of the DLQ connection (must be a cluster connection) +11. **DlqDatabase** - DLQ database name (optional, default: `dlq`) +12. **DlqCollection** - DLQ collection name (optional, default: `dlq-messages`) + +**Pipeline stages:** + +- `$source` - Reads from the source connection +- `$merge` - Merges data into the target cluster connection (for regular collections) +- **Options.Dlq** - Configured to capture failed messages + +### Example 3: Kafka to Cluster Stream Processor ([stream-processor-kafka-to-cluster.json](stream-processor-kafka-to-cluster.json)) + +Creates a stream processor that reads from a Kafka topic and writes to a cluster connection as a time-series collection. + +**Use cases:** + +- Ingesting data from Kafka into MongoDB Atlas +- Real-time data pipeline from Kafka to MongoDB +- Event streaming from Kafka to time-series collections + +**Parameters:** + +1. **ProjectId** - Atlas Project Id (24 hexadecimal characters) +2. **WorkspaceName** - Name of your stream instance/workspace +3. **ProcessorName** - Unique name for the stream processor +4. **KafkaSourceConnectionName** - Name of the Kafka source connection +5. **KafkaTopic** - Name of the Kafka topic to read from +6. **SinkConnectionName** - Name of the sink cluster connection (must be a cluster connection) +7. **SinkDatabase** - Target database name (optional, default: `kafka`) +8. **SinkCollection** - Target collection name (optional, default: `kafka_messages`) +9. **DesiredState** - Must be `CREATED` or `STOPPED` (cannot be `STARTED` without a working Kafka cluster) +10. **Profile** - Secret Manager Profile for Atlas credentials (optional, default: `default`) + +**Pipeline stages:** + +- `$source` - Reads from Kafka topic (requires `connectionName` and `topic`) +- `$emit` - Writes to cluster connection as time-series collection + +**Important Notes:** + +- ⚠️ **This processor must be created in `CREATED` state** - it cannot be started (`STARTED`) without a working Kafka cluster that is accessible from MongoDB Atlas Stream Processing infrastructure +- The processor will fail if you attempt to start it without a valid Kafka connection +- To use this processor with a real Kafka cluster, first ensure your Kafka connection is properly configured and accessible, then update the processor's `DesiredState` to `STARTED` + +### Example 4: Cluster to Kafka Stream Processor ([stream-processor-cluster-to-kafka.json](stream-processor-cluster-to-kafka.json)) + +Creates a stream processor that reads from a cluster connection and writes to a Kafka topic. + +**Use cases:** + +- Streaming MongoDB data to Kafka +- Real-time data export from Atlas to Kafka +- Event streaming from MongoDB to Kafka topics + +**Parameters:** + +1. **ProjectId** - Atlas Project Id (24 hexadecimal characters) +2. **WorkspaceName** - Name of your stream instance/workspace +3. **ProcessorName** - Unique name for the stream processor +4. **SourceConnectionName** - Name of the source cluster connection +5. **KafkaSinkConnectionName** - Name of the Kafka sink connection +6. **KafkaTopic** - Name of the Kafka topic to write to +7. **DesiredState** - Must be `CREATED` or `STOPPED` (cannot be `STARTED` without a working Kafka cluster) +8. **Profile** - Secret Manager Profile for Atlas credentials (optional, default: `default`) + +**Pipeline stages:** + +- `$source` - Reads from cluster connection +- `$emit` - Writes to Kafka topic (requires `connectionName` and `topic`) + +**Important Notes:** + +- ⚠️ **This processor must be created in `CREATED` state** - it cannot be started (`STARTED`) without a working Kafka cluster that is accessible from MongoDB Atlas Stream Processing infrastructure +- The processor will fail if you attempt to start it without a valid Kafka connection +- To use this processor with a real Kafka cluster, first ensure your Kafka connection is properly configured and accessible, then update the processor's `DesiredState` to `STARTED` + +## Pipeline Stage Options + +### $source + +Reads data from a source connection. Supported sources: + +- **Sample connections**: `sample_stream_solar` (for testing) +- **Cluster connections**: Read from MongoDB collections +- **Kafka connections**: Read from Kafka topics (requires `topic` parameter) + +### $emit + +Writes data to a target connection. Options: + +- **Cluster**: Write to MongoDB collections + - `connectionName` - Target cluster connection name + - `db` - Target database + - `coll` - Target collection + - `timeseries` (optional) - For time-series collections + - `timeField` - Field name containing timestamp +- **Kafka**: Write to Kafka topics + - `connectionName` - Target Kafka connection name + - `topic` - Target Kafka topic name + +### $merge + +Merges data into regular MongoDB collections. Use `$merge` for standard collections (non-timeseries). + +- **Cluster**: Merge into MongoDB collections + - `connectionName` - Target cluster connection name + - `db` - Target database + - `coll` - Target collection + - `into` - Object containing connection, database, and collection details + +**Note:** Use `$merge` for regular collections. Use `$emit` only for time-series collections (requires `timeseries` option). + +## State Management + +The `DesiredState` parameter controls the desired processor lifecycle: + +- **CREATED** - Processor is created but not running (default) +- **STARTED** - Processor is actively processing data +- **STOPPED** - Processor is stopped (can be restarted) + +The `State` output (read-only) reflects the actual current state of the processor as returned by the Atlas API. Common states include `CREATED`, `STARTED`, `STOPPED`, and `FAILED`. + +**Note:** When updating a processor, if the current state is `STARTED`, the processor will be stopped, updated, and then restarted if the `DesiredState` is `STARTED`. + +## Kafka Integration Notes + +When working with Kafka-based stream processors: + +1. **Connection Validation**: The stream processor validates that the connection name exists in the workspace, but does not validate Kafka connectivity at creation time. + +2. **State Management**: Kafka processors should be created in `CREATED` state. They can only be started (`STARTED`) when: + - A valid Kafka connection exists + - The Kafka cluster is accessible from MongoDB Atlas Stream Processing infrastructure + - Authentication credentials are correct + - Network connectivity is established (public access or VPC peering) + +3. **Failure Handling**: If you attempt to start a Kafka processor without a working Kafka cluster, the processor will enter `FAILED` state. You can check the processor state via the `ProcessorState` output. + +4. **Testing**: The examples provided (Examples 3 and 4) are designed to be created successfully even without a working Kafka cluster, allowing you to validate the CloudFormation template structure. To actually process data, you'll need a properly configured Kafka cluster. diff --git a/examples/atlas-streams/stream-processor/stream-processor-cluster-to-kafka.json b/examples/atlas-streams/stream-processor/stream-processor-cluster-to-kafka.json new file mode 100644 index 000000000..62e19dbaa --- /dev/null +++ b/examples/atlas-streams/stream-processor/stream-processor-cluster-to-kafka.json @@ -0,0 +1,93 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates a stream processor that reads from a cluster connection and writes to a Kafka connection. Note: This processor is created in CREATED state and cannot be started without a working Kafka cluster.", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id (24 hexadecimal characters)" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processing workspace" + }, + "ProcessorName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processor" + }, + "SourceConnectionName": { + "Type": "String", + "Description": "Name of the source stream connection (must be a cluster connection)" + }, + "KafkaSinkConnectionName": { + "Type": "String", + "Description": "Name of the Kafka sink stream connection" + }, + "KafkaTopic": { + "Type": "String", + "Description": "Name of the Kafka topic to write to" + }, + "DesiredState": { + "Type": "String", + "Default": "CREATED", + "Description": "Desired state of the stream processor. Must be CREATED (cannot be STARTED without a working Kafka cluster)", + "AllowedValues": ["CREATED", "STOPPED"] + } + }, + "Resources": { + "StreamProcessor": { + "Type": "MongoDB::Atlas::StreamProcessor", + "Properties": { + "Profile": { + "Ref": "Profile" + }, + "ProjectId": { + "Ref": "ProjectId" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "ProcessorName": { + "Ref": "ProcessorName" + }, + "Pipeline": { + "Fn::Sub": [ + "[{\"$source\": {\"connectionName\": \"${SourceConnection}\"}}, {\"$emit\": {\"connectionName\": \"${KafkaSink}\", \"topic\": \"${KafkaTopic}\"}}]", + { + "SourceConnection": { + "Ref": "SourceConnectionName" + }, + "KafkaSink": { + "Ref": "KafkaSinkConnectionName" + }, + "KafkaTopic": { + "Ref": "KafkaTopic" + } + } + ] + }, + "DesiredState": { + "Ref": "DesiredState" + } + } + } + }, + "Outputs": { + "ProcessorId": { + "Description": "Unique identifier of the stream processor", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "Id"] + } + }, + "ProcessorState": { + "Description": "Current state of the stream processor from Atlas API", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "State"] + } + } + } +} diff --git a/examples/atlas-streams/stream-processor/stream-processor-dlq.json b/examples/atlas-streams/stream-processor/stream-processor-dlq.json new file mode 100644 index 000000000..eff1e9158 --- /dev/null +++ b/examples/atlas-streams/stream-processor/stream-processor-dlq.json @@ -0,0 +1,129 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates a stream processor with Dead Letter Queue (DLQ) configuration. The processor uses a source connection and merges data into a cluster connection, with failed messages sent to a DLQ collection.", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id (24 hexadecimal characters)" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processing workspace" + }, + "ProcessorName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processor" + }, + "SourceConnectionName": { + "Type": "String", + "Description": "Name of the source stream connection" + }, + "SinkConnectionName": { + "Type": "String", + "Description": "Name of the sink stream connection (must be a cluster connection)" + }, + "SinkDatabase": { + "Type": "String", + "Default": "test", + "Description": "Name of the database for the sink connection" + }, + "SinkCollection": { + "Type": "String", + "Default": "output", + "Description": "Name of the collection for the sink connection" + }, + "DlqConnectionName": { + "Type": "String", + "Description": "Name of the DLQ connection (must be a cluster connection)" + }, + "DlqDatabase": { + "Type": "String", + "Default": "dlq", + "Description": "Name of the database for the DLQ" + }, + "DlqCollection": { + "Type": "String", + "Default": "dlq-messages", + "Description": "Name of the collection for the DLQ" + }, + "DesiredState": { + "Type": "String", + "Default": "CREATED", + "Description": "Desired state of the stream processor", + "AllowedValues": ["CREATED", "STARTED", "STOPPED"] + } + }, + "Resources": { + "StreamProcessor": { + "Type": "MongoDB::Atlas::StreamProcessor", + "Properties": { + "Profile": { + "Ref": "Profile" + }, + "ProjectId": { + "Ref": "ProjectId" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "ProcessorName": { + "Ref": "ProcessorName" + }, + "Pipeline": { + "Fn::Sub": [ + "[{\"$source\": {\"connectionName\": \"${SourceConnection}\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"${SinkConnection}\", \"db\": \"${SinkDb}\", \"coll\": \"${SinkColl}\"}}}]", + { + "SourceConnection": { + "Ref": "SourceConnectionName" + }, + "SinkConnection": { + "Ref": "SinkConnectionName" + }, + "SinkDb": { + "Ref": "SinkDatabase" + }, + "SinkColl": { + "Ref": "SinkCollection" + } + } + ] + }, + "DesiredState": { + "Ref": "DesiredState" + }, + "Options": { + "Dlq": { + "ConnectionName": { + "Ref": "DlqConnectionName" + }, + "Db": { + "Ref": "DlqDatabase" + }, + "Coll": { + "Ref": "DlqCollection" + } + } + } + } + } + }, + "Outputs": { + "ProcessorId": { + "Description": "Unique identifier of the stream processor", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "Id"] + } + }, + "ProcessorState": { + "Description": "Current state of the stream processor from Atlas API", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "State"] + } + } + } +} diff --git a/examples/atlas-streams/stream-processor/stream-processor-kafka-to-cluster.json b/examples/atlas-streams/stream-processor/stream-processor-kafka-to-cluster.json new file mode 100644 index 000000000..b9ba30e2e --- /dev/null +++ b/examples/atlas-streams/stream-processor/stream-processor-kafka-to-cluster.json @@ -0,0 +1,109 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates a stream processor that reads from a Kafka connection and writes to a cluster connection. Note: This processor is created in CREATED state and cannot be started without a working Kafka cluster.", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id (24 hexadecimal characters)" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processing workspace" + }, + "ProcessorName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processor" + }, + "KafkaSourceConnectionName": { + "Type": "String", + "Description": "Name of the Kafka source stream connection" + }, + "KafkaTopic": { + "Type": "String", + "Description": "Name of the Kafka topic to read from" + }, + "SinkConnectionName": { + "Type": "String", + "Description": "Name of the sink stream connection (must be a cluster connection)" + }, + "SinkDatabase": { + "Type": "String", + "Default": "kafka", + "Description": "Name of the database for the sink connection" + }, + "SinkCollection": { + "Type": "String", + "Default": "kafka_messages", + "Description": "Name of the collection for the sink connection" + }, + "DesiredState": { + "Type": "String", + "Default": "CREATED", + "Description": "Desired state of the stream processor. Must be CREATED (cannot be STARTED without a working Kafka cluster)", + "AllowedValues": ["CREATED", "STOPPED"] + } + }, + "Resources": { + "StreamProcessor": { + "Type": "MongoDB::Atlas::StreamProcessor", + "Properties": { + "Profile": { + "Ref": "Profile" + }, + "ProjectId": { + "Ref": "ProjectId" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "ProcessorName": { + "Ref": "ProcessorName" + }, + "Pipeline": { + "Fn::Sub": [ + "[{\"$source\": {\"connectionName\": \"${KafkaSource}\", \"topic\": \"${KafkaTopic}\"}}, {\"$emit\": {\"connectionName\": \"${SinkConnection}\", \"db\": \"${SinkDb}\", \"coll\": \"${SinkColl}\", \"timeseries\": {\"timeField\": \"ts\"}}}]", + { + "KafkaSource": { + "Ref": "KafkaSourceConnectionName" + }, + "KafkaTopic": { + "Ref": "KafkaTopic" + }, + "SinkConnection": { + "Ref": "SinkConnectionName" + }, + "SinkDb": { + "Ref": "SinkDatabase" + }, + "SinkColl": { + "Ref": "SinkCollection" + } + } + ] + }, + "DesiredState": { + "Ref": "DesiredState" + } + } + } + }, + "Outputs": { + "ProcessorId": { + "Description": "Unique identifier of the stream processor", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "Id"] + } + }, + "ProcessorState": { + "Description": "Current state of the stream processor from Atlas API", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "State"] + } + } + } +} diff --git a/examples/atlas-streams/stream-processor/stream-processor.json b/examples/atlas-streams/stream-processor/stream-processor.json new file mode 100644 index 000000000..2093b7ebf --- /dev/null +++ b/examples/atlas-streams/stream-processor/stream-processor.json @@ -0,0 +1,102 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates a stream processor for a given stream workspace in the specified project. The processor reads from a source connection (sample data, cluster, or Kafka) and merges data into a cluster connection collection.", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id (24 hexadecimal characters)" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processing workspace" + }, + "ProcessorName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processor" + }, + "SourceConnectionName": { + "Type": "String", + "Description": "Name of the source stream connection (e.g., sample_stream_solar for sample data, or a cluster/kafka connection name)" + }, + "SinkConnectionName": { + "Type": "String", + "Description": "Name of the sink stream connection (must be a cluster connection)" + }, + "SinkDatabase": { + "Type": "String", + "Default": "test", + "Description": "Name of the database for the sink connection" + }, + "SinkCollection": { + "Type": "String", + "Default": "output", + "Description": "Name of the collection for the sink connection" + }, + "DesiredState": { + "Type": "String", + "Default": "CREATED", + "Description": "Desired state of the stream processor", + "AllowedValues": ["CREATED", "STARTED", "STOPPED"] + } + }, + "Resources": { + "StreamProcessor": { + "Type": "MongoDB::Atlas::StreamProcessor", + "Properties": { + "Profile": { + "Ref": "Profile" + }, + "ProjectId": { + "Ref": "ProjectId" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "ProcessorName": { + "Ref": "ProcessorName" + }, + "Pipeline": { + "Fn::Sub": [ + "[{\"$source\": {\"connectionName\": \"${SourceConnection}\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"${SinkConnection}\", \"db\": \"${SinkDb}\", \"coll\": \"${SinkColl}\"}}}]", + { + "SourceConnection": { + "Ref": "SourceConnectionName" + }, + "SinkConnection": { + "Ref": "SinkConnectionName" + }, + "SinkDb": { + "Ref": "SinkDatabase" + }, + "SinkColl": { + "Ref": "SinkCollection" + } + } + ] + }, + "DesiredState": { + "Ref": "DesiredState" + } + } + } + }, + "Outputs": { + "ProcessorId": { + "Description": "Unique identifier of the stream processor", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "Id"] + } + }, + "ProcessorState": { + "Description": "Current state of the stream processor from Atlas API", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "State"] + } + } + } +} From f8b0e65ee8cd4317dc9f5e242ebc3e98df73bca5 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Tue, 13 Jan 2026 17:04:32 -0500 Subject: [PATCH 02/10] CLOUDP-368428-Stream-Processor Version upgrade from master --- .../cmd/resource/callbacks.go | 2 +- .../cmd/resource/callbacks_test.go | 481 ---------------- .../stream-processor/cmd/resource/helpers.go | 2 +- .../cmd/resource/helpers_test.go | 325 ----------- .../stream-processor/cmd/resource/mappings.go | 2 +- .../cmd/resource/mappings_test.go | 2 +- .../stream-processor/cmd/resource/resource.go | 2 +- .../cmd/resource/resource_test.go | 513 ------------------ 8 files changed, 5 insertions(+), 1324 deletions(-) delete mode 100644 cfn-resources/stream-processor/cmd/resource/callbacks_test.go delete mode 100644 cfn-resources/stream-processor/cmd/resource/helpers_test.go delete mode 100644 cfn-resources/stream-processor/cmd/resource/resource_test.go diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks.go b/cfn-resources/stream-processor/cmd/resource/callbacks.go index 44c255c15..198386de8 100644 --- a/cfn-resources/stream-processor/cmd/resource/callbacks.go +++ b/cfn-resources/stream-processor/cmd/resource/callbacks.go @@ -19,7 +19,7 @@ import ( "fmt" "maps" - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks_test.go b/cfn-resources/stream-processor/cmd/resource/callbacks_test.go deleted file mode 100644 index 7958b181e..000000000 --- a/cfn-resources/stream-processor/cmd/resource/callbacks_test.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright 2026 MongoDB Inc -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource_test - -import ( - "context" - "net/http" - "testing" - "time" - - "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" - "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" - "github.com/mongodb/mongodbatlas-cloudformation-resources/util" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20250312010/admin" - "go.mongodb.org/atlas-sdk/v20250312010/mockadmin" -) - -var ( - baseModel = &resource.Model{ - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - } - baseCallbackCtx = &resource.CallbackData{ - ProjectID: "507f1f77bcf86cd799439011", - WorkspaceOrInstanceName: "workspace-1", - ProcessorName: "processor-1", - StartTime: time.Now().Format(time.RFC3339), - TimeoutDuration: "20m", - } -) - -func TestIsCallback(t *testing.T) { - testCases := map[string]struct { - req handler.Request - expectedResult bool - }{ - "isCallback": { - req: handler.Request{ - CallbackContext: map[string]any{"callbackStreamProcessor": true}, - }, - expectedResult: true, - }, - "notCallback": { - req: handler.Request{ - CallbackContext: map[string]any{}, - }, - expectedResult: false, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert.Equal(t, tc.expectedResult, resource.IsCallback(&tc.req)) - }) - } -} - -func TestGetCallbackData(t *testing.T) { - testCases := map[string]struct { - expectedResult *resource.CallbackData - req handler.Request - }{ - "allFieldsPresent": { - req: handler.Request{ - CallbackContext: map[string]any{ - "projectID": "507f1f77bcf86cd799439011", - "workspaceName": "workspace-1", - "processorName": "processor-1", - "needsStarting": true, - "desiredState": "STARTED", - "startTime": "2024-01-01T00:00:00Z", - "timeoutDuration": "20m", - "deleteOnCreateTimeout": true, - }, - }, - expectedResult: &resource.CallbackData{ - ProjectID: "507f1f77bcf86cd799439011", - WorkspaceOrInstanceName: "workspace-1", - ProcessorName: "processor-1", - NeedsStarting: true, - DesiredState: "STARTED", - StartTime: "2024-01-01T00:00:00Z", - TimeoutDuration: "20m", - DeleteOnCreateTimeout: true, - }, - }, - "partialFields": { - req: handler.Request{ - CallbackContext: map[string]any{ - "projectID": "507f1f77bcf86cd799439011", - "workspaceName": "workspace-1", - "processorName": "processor-1", - }, - }, - expectedResult: &resource.CallbackData{ - ProjectID: "507f1f77bcf86cd799439011", - WorkspaceOrInstanceName: "workspace-1", - ProcessorName: "processor-1", - }, - }, - "emptyContext": { - req: handler.Request{ - CallbackContext: map[string]any{}, - }, - expectedResult: &resource.CallbackData{}, - }, - "typeAssertionFailures": { - req: handler.Request{ - CallbackContext: map[string]any{ - "projectID": 123, - "workspaceName": true, - "processorName": []string{"invalid"}, - "needsStarting": "not a bool", - "desiredState": 456, - "startTime": struct{}{}, - "timeoutDuration": nil, - "deleteOnCreateTimeout": "not a bool", - }, - }, - expectedResult: &resource.CallbackData{}, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - assert.Equal(t, tc.expectedResult, resource.GetCallbackData(tc.req)) - }) - } -} - -func TestValidateCallbackData(t *testing.T) { - validCtx := &resource.CallbackData{ - ProjectID: "507f1f77bcf86cd799439011", - WorkspaceOrInstanceName: "workspace-1", - ProcessorName: "processor-1", - } - - testCases := map[string]struct { - callbackCtx *resource.CallbackData - expectedMsgContain string - expectedError bool - }{ - "valid": { - callbackCtx: validCtx, - expectedError: false, - }, - "missingProjectID": { - callbackCtx: &resource.CallbackData{WorkspaceOrInstanceName: "workspace-1", ProcessorName: "processor-1"}, - expectedError: true, - expectedMsgContain: "Missing required values", - }, - "missingWorkspaceName": { - callbackCtx: &resource.CallbackData{ProjectID: "507f1f77bcf86cd799439011", ProcessorName: "processor-1"}, - expectedError: true, - expectedMsgContain: "Missing required values", - }, - "missingProcessorName": { - callbackCtx: &resource.CallbackData{ProjectID: "507f1f77bcf86cd799439011", WorkspaceOrInstanceName: "workspace-1"}, - expectedError: true, - expectedMsgContain: "Missing required values", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - peErr := resource.ValidateCallbackData(tc.callbackCtx) - if tc.expectedError { - require.NotNil(t, peErr) - assert.Contains(t, peErr.Message, tc.expectedMsgContain) - } else { - require.Nil(t, peErr) - } - }) - } -} - -func TestBuildCallbackContext(t *testing.T) { - testCases := map[string]struct { - additionalFields map[string]any - validateFunc func(t *testing.T, ctx map[string]any) - }{ - "basic": { - additionalFields: map[string]any{}, - validateFunc: func(t *testing.T, ctx map[string]any) { - t.Helper() - assert.True(t, ctx["callbackStreamProcessor"].(bool)) - assert.Equal(t, "507f1f77bcf86cd799439011", ctx["projectID"]) - assert.Equal(t, "workspace-1", ctx["workspaceName"]) - assert.Equal(t, "processor-1", ctx["processorName"]) - }, - }, - "withAdditionalFields": { - additionalFields: map[string]any{"needsStarting": true, "desiredState": "STARTED"}, - validateFunc: func(t *testing.T, ctx map[string]any) { - t.Helper() - assert.True(t, ctx["callbackStreamProcessor"].(bool)) - assert.Equal(t, "507f1f77bcf86cd799439011", ctx["projectID"]) - assert.True(t, ctx["needsStarting"].(bool)) - assert.Equal(t, "STARTED", ctx["desiredState"]) - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - ctx := resource.BuildCallbackContext("507f1f77bcf86cd799439011", "workspace-1", "processor-1", tc.additionalFields) - if tc.validateFunc != nil { - tc.validateFunc(t, ctx) - } - }) - } -} - -func TestHandleCreateCallback(t *testing.T) { - mockClient := &admin.APIClient{StreamsApi: mockadmin.NewStreamsApi(t)} - ctx := context.Background() - - timeoutCtx := func(deleteOnTimeout bool) *resource.CallbackData { - ctx := *baseCallbackCtx - ctx.StartTime = time.Now().Add(-25 * time.Minute).Format(time.RFC3339) - ctx.DeleteOnCreateTimeout = deleteOnTimeout - return &ctx - } - - createMockProcessor := func(state string) *admin.StreamsProcessorWithStats { - return &admin.StreamsProcessorWithStats{Name: "processor-1", State: state} - } - - setupGetProcessor := func(m *mockadmin.StreamsApi, processor *admin.StreamsProcessorWithStats) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) - } - - setupStartProcessor := func(m *mockadmin.StreamsApi) { - startReq := admin.StartStreamProcessorApiRequest{ApiService: m} - m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) - m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) - } - - testCases := map[string]struct { - currentModel *resource.Model - callbackCtx *resource.CallbackData - mockSetup func(*mockadmin.StreamsApi) - expectedStatus handler.Status - expectedMsg string - }{ - "timeoutExceededWithCleanup": { - currentModel: baseModel, - callbackCtx: timeoutCtx(true), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.DeleteStreamProcessorApiRequest{ApiService: m} - m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) - m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, nil) - }, - expectedStatus: handler.Failed, - expectedMsg: "Timeout reached", - }, - "timeoutExceededWithoutCleanup": { - currentModel: baseModel, - callbackCtx: timeoutCtx(false), - expectedStatus: handler.Failed, - expectedMsg: "Timeout reached", - }, - "createdStateNeedsStarting": { - currentModel: baseModel, - callbackCtx: func() *resource.CallbackData { - ctx := *baseCallbackCtx - ctx.NeedsStarting = true - return &ctx - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.CreatedState)) - setupStartProcessor(m) - }, - expectedStatus: handler.InProgress, - expectedMsg: "Starting stream processor", - }, - "createdStateNoStarting": { - currentModel: baseModel, - callbackCtx: func() *resource.CallbackData { - ctx := *baseCallbackCtx - ctx.NeedsStarting = false - return &ctx - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.CreatedState)) - }, - expectedStatus: handler.Success, - expectedMsg: "Create Completed", - }, - "startedState": { - currentModel: baseModel, - callbackCtx: baseCallbackCtx, - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.StartedState)) - }, - expectedStatus: handler.Success, - expectedMsg: "Create Completed", - }, - "creatingState": { - currentModel: baseModel, - callbackCtx: baseCallbackCtx, - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.CreatingState)) - }, - expectedStatus: handler.InProgress, - expectedMsg: "Creating stream processor", - }, - "failedState": { - currentModel: baseModel, - callbackCtx: baseCallbackCtx, - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.FailedState)) - }, - expectedStatus: handler.Failed, - expectedMsg: "FAILED state", - }, - "unexpectedState": { - currentModel: baseModel, - callbackCtx: baseCallbackCtx, - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, &admin.StreamsProcessorWithStats{Name: "processor-1", State: "UNKNOWN"}) - }, - expectedStatus: handler.Failed, - expectedMsg: "Unexpected state", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - mockStreamsAPI := mockadmin.NewStreamsApi(t) - if tc.mockSetup != nil { - tc.mockSetup(mockStreamsAPI) - } - mockClient.StreamsApi = mockStreamsAPI - - event, err := resource.HandleCreateCallback(ctx, mockClient, tc.currentModel, tc.callbackCtx) - require.NoError(t, err) - assert.Equal(t, tc.expectedStatus, event.OperationStatus) - assert.Contains(t, event.Message, tc.expectedMsg) - }) - } -} - -func TestHandleUpdateCallback(t *testing.T) { - mockClient := &admin.APIClient{StreamsApi: mockadmin.NewStreamsApi(t)} - ctx := context.Background() - - createMockProcessor := func(state string) *admin.StreamsProcessorWithStats { - return &admin.StreamsProcessorWithStats{Name: "processor-1", State: state} - } - - setupGetProcessor := func(m *mockadmin.StreamsApi, processor *admin.StreamsProcessorWithStats) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) - } - - setupUpdateProcessor := func(m *mockadmin.StreamsApi, updatedState string) { - updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) - m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(createMockProcessor(updatedState), &http.Response{StatusCode: 200}, nil) - } - - setupStartProcessor := func(m *mockadmin.StreamsApi) { - startReq := admin.StartStreamProcessorApiRequest{ApiService: m} - m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) - m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) - } - - setupStopProcessor := func(m *mockadmin.StreamsApi) { - stopReq := admin.StopStreamProcessorApiRequest{ApiService: m} - m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) - m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(nil, nil) - } - - testCases := map[string]struct { - currentModel *resource.Model - callbackCtx *resource.CallbackData - mockSetup func(*mockadmin.StreamsApi) - expectedStatus handler.Status - expectedMsg string - }{ - "stoppedStateWithDesiredStarted": { - currentModel: &resource.Model{ - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - WorkspaceName: util.StringPtr("workspace-1"), - Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), - }, - callbackCtx: func() *resource.CallbackData { - ctx := *baseCallbackCtx - ctx.DesiredState = resource.StartedState - return &ctx - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.StoppedState)) - setupUpdateProcessor(m, resource.StoppedState) - setupStartProcessor(m) - }, - expectedStatus: handler.InProgress, - expectedMsg: "Starting stream processor", - }, - "startedStateWithDesiredStarted": { - currentModel: baseModel, - callbackCtx: func() *resource.CallbackData { - ctx := *baseCallbackCtx - ctx.DesiredState = resource.StartedState - return &ctx - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.StartedState)) - }, - expectedStatus: handler.Success, - expectedMsg: "Update Completed", - }, - "startedStateWithDesiredStopped": { - currentModel: baseModel, - callbackCtx: func() *resource.CallbackData { - ctx := *baseCallbackCtx - ctx.DesiredState = resource.StoppedState - return &ctx - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.StartedState)) - setupStopProcessor(m) - }, - expectedStatus: handler.InProgress, - expectedMsg: "Stopping stream processor", - }, - "failedState": { - currentModel: baseModel, - callbackCtx: baseCallbackCtx, - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, createMockProcessor(resource.FailedState)) - }, - expectedStatus: handler.Failed, - expectedMsg: "FAILED state", - }, - "defaultState": { - currentModel: baseModel, - callbackCtx: baseCallbackCtx, - mockSetup: func(m *mockadmin.StreamsApi) { - setupGetProcessor(m, &admin.StreamsProcessorWithStats{Name: "processor-1", State: "UNKNOWN"}) - }, - expectedStatus: handler.InProgress, - expectedMsg: "Updating stream processor", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - mockStreamsAPI := mockadmin.NewStreamsApi(t) - if tc.mockSetup != nil { - tc.mockSetup(mockStreamsAPI) - } - mockClient.StreamsApi = mockStreamsAPI - - event, err := resource.HandleUpdateCallback(ctx, mockClient, tc.currentModel, tc.callbackCtx) - require.NoError(t, err) - assert.Equal(t, tc.expectedStatus, event.OperationStatus) - assert.Contains(t, event.Message, tc.expectedMsg) - }) - } -} diff --git a/cfn-resources/stream-processor/cmd/resource/helpers.go b/cfn-resources/stream-processor/cmd/resource/helpers.go index 1bb7668aa..1576107b7 100644 --- a/cfn-resources/stream-processor/cmd/resource/helpers.go +++ b/cfn-resources/stream-processor/cmd/resource/helpers.go @@ -20,7 +20,7 @@ import ( "net/http" "time" - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" diff --git a/cfn-resources/stream-processor/cmd/resource/helpers_test.go b/cfn-resources/stream-processor/cmd/resource/helpers_test.go deleted file mode 100644 index 77fb3a6a5..000000000 --- a/cfn-resources/stream-processor/cmd/resource/helpers_test.go +++ /dev/null @@ -1,325 +0,0 @@ -// Copyright 2026 MongoDB Inc -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource_test - -import ( - "errors" - "net/http" - "testing" - "time" - - "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" - "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" - "github.com/mongodb/mongodbatlas-cloudformation-resources/util" - "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20250312010/admin" -) - -func TestCopyIdentifyingFields(t *testing.T) { - testCases := map[string]struct { - resourceModel *resource.Model - currentModel *resource.Model - validateFunc func(t *testing.T, resourceModel *resource.Model) - }{ - "withWorkspaceName": { - resourceModel: &resource.Model{}, - currentModel: &resource.Model{ - Profile: util.StringPtr("default"), - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - WorkspaceName: util.StringPtr("workspace-1"), - }, - validateFunc: func(t *testing.T, rm *resource.Model) { - t.Helper() - assert.Equal(t, "default", util.SafeString(rm.Profile)) - assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(rm.ProjectId)) - assert.Equal(t, "processor-1", util.SafeString(rm.ProcessorName)) - assert.Equal(t, "workspace-1", util.SafeString(rm.WorkspaceName)) - assert.Equal(t, "workspace-1", util.SafeString(rm.InstanceName)) - }, - }, - "withInstanceName": { - resourceModel: &resource.Model{}, - currentModel: &resource.Model{ - Profile: util.StringPtr("default"), - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - InstanceName: util.StringPtr("instance-1"), - }, - validateFunc: func(t *testing.T, rm *resource.Model) { - t.Helper() - assert.Equal(t, "default", util.SafeString(rm.Profile)) - assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(rm.ProjectId)) - assert.Equal(t, "processor-1", util.SafeString(rm.ProcessorName)) - assert.Equal(t, "instance-1", util.SafeString(rm.InstanceName)) - assert.Equal(t, "instance-1", util.SafeString(rm.WorkspaceName)) - }, - }, - "emptyWorkspaceName": { - resourceModel: &resource.Model{}, - currentModel: &resource.Model{ - Profile: util.StringPtr("default"), - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - WorkspaceName: util.StringPtr(""), - }, - validateFunc: func(t *testing.T, rm *resource.Model) { - t.Helper() - assert.Nil(t, rm.WorkspaceName) - }, - }, - "bothNil": { - resourceModel: &resource.Model{}, - currentModel: &resource.Model{ - Profile: util.StringPtr("default"), - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - WorkspaceName: nil, - InstanceName: nil, - }, - validateFunc: func(t *testing.T, rm *resource.Model) { - t.Helper() - assert.Nil(t, rm.WorkspaceName) - assert.Nil(t, rm.InstanceName) - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - resource.CopyIdentifyingFields(tc.resourceModel, tc.currentModel) - if tc.validateFunc != nil { - tc.validateFunc(t, tc.resourceModel) - } - }) - } -} - -func TestParseTimeout(t *testing.T) { - testCases := map[string]struct { - timeoutStr string - expectedResult time.Duration - }{ - "validDuration": { - timeoutStr: "20m", - expectedResult: 20 * time.Minute, - }, - "validSeconds": { - timeoutStr: "30s", - expectedResult: 30 * time.Second, - }, - "emptyString": { - timeoutStr: "", - expectedResult: resource.DefaultCreateTimeout, - }, - "invalidFormat": { - timeoutStr: "invalid", - expectedResult: resource.DefaultCreateTimeout, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - result := resource.ParseTimeout(tc.timeoutStr) - assert.Equal(t, tc.expectedResult, result) - }) - } -} - -func TestIsTimeoutExceeded(t *testing.T) { - testCases := map[string]struct { - startTimeStr string - timeoutDurationStr string - expectedResult bool - }{ - "timeoutExceeded": { - startTimeStr: time.Now().Add(-25 * time.Minute).Format(time.RFC3339), - timeoutDurationStr: "20m", - expectedResult: true, - }, - "timeoutNotExceeded": { - startTimeStr: time.Now().Add(-10 * time.Minute).Format(time.RFC3339), - timeoutDurationStr: "20m", - expectedResult: false, - }, - "emptyStartTime": { - startTimeStr: "", - timeoutDurationStr: "20m", - expectedResult: false, - }, - "emptyTimeoutDuration": { - startTimeStr: time.Now().Format(time.RFC3339), - timeoutDurationStr: "", - expectedResult: false, - }, - "invalidStartTime": { - startTimeStr: "invalid", - timeoutDurationStr: "20m", - expectedResult: false, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - result := resource.IsTimeoutExceeded(tc.startTimeStr, tc.timeoutDurationStr) - assert.Equal(t, tc.expectedResult, result) - }) - } -} - -func TestValidateUpdateStateTransition(t *testing.T) { - testCases := map[string]struct { - currentState string - desiredState string - expectedErrMsg string - expectedIsValid bool - }{ - "validCREATEDtoSTARTED": { - currentState: resource.CreatedState, - desiredState: resource.StartedState, - expectedIsValid: true, - }, - "invalidSTARTEDtoCREATED": { - currentState: resource.StartedState, - desiredState: resource.CreatedState, - expectedIsValid: false, - expectedErrMsg: "cannot transition from STARTED to CREATED", - }, - "validSTARTEDtoSTOPPED": { - currentState: resource.StartedState, - desiredState: resource.StoppedState, - expectedIsValid: true, - }, - "invalidCREATEDtoSTOPPED": { - currentState: resource.CreatedState, - desiredState: resource.StoppedState, - expectedIsValid: false, - expectedErrMsg: "must be in STARTED state", - }, - "sameState": { - currentState: resource.CreatedState, - desiredState: resource.CreatedState, - expectedIsValid: true, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - errMsg, isValid := resource.ValidateUpdateStateTransition(tc.currentState, tc.desiredState) - assert.Equal(t, tc.expectedIsValid, isValid) - if !tc.expectedIsValid { - assert.Contains(t, errMsg, tc.expectedErrMsg) - } - }) - } -} - -func TestHandleError(t *testing.T) { - testCases := map[string]struct { - response *http.Response - method constants.CfnFunctions - err error - expectedStatus handler.Status - expectedErrCode string - expectedMsg string - }{ - "conflictError": { - response: &http.Response{ - StatusCode: http.StatusConflict, - }, - method: constants.CREATE, - err: errors.New("already exists"), - expectedStatus: handler.Failed, - expectedErrCode: "AlreadyExists", - expectedMsg: "CREATE error:already exists", - }, - "genericError": { - response: &http.Response{ - StatusCode: http.StatusBadRequest, - }, - method: constants.UPDATE, - err: errors.New("bad request"), - expectedStatus: handler.Failed, - expectedMsg: "UPDATE error:bad request", - }, - "nilResponse": { - response: nil, - method: constants.DELETE, - err: errors.New("network error"), - expectedStatus: handler.Failed, - expectedMsg: "DELETE error:network error", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - event, err := resource.HandleError(tc.response, tc.method, tc.err) - require.NoError(t, err) - assert.Equal(t, tc.expectedStatus, event.OperationStatus) - assert.Contains(t, event.Message, tc.expectedMsg) - if tc.expectedErrCode != "" { - assert.Equal(t, tc.expectedErrCode, event.HandlerErrorCode) - } - }) - } -} - -func TestFinalizeModel(t *testing.T) { - testCases := map[string]struct { - streamProcessor *admin.StreamsProcessorWithStats - currentModel *resource.Model - validateFunc func(t *testing.T, event handler.ProgressEvent) - message string - expectedStatus handler.Status - expectedMsg string - }{ - "success": { - streamProcessor: &admin.StreamsProcessorWithStats{ - Name: "processor-1", - Id: "507f1f77bcf86cd799439011", - State: resource.CreatedState, - }, - currentModel: &resource.Model{ - Profile: util.StringPtr("default"), - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - WorkspaceName: util.StringPtr("workspace-1"), - }, - message: "Create Complete", - expectedStatus: handler.Success, - expectedMsg: "Create Complete", - validateFunc: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - model, ok := event.ResourceModel.(*resource.Model) - require.True(t, ok, "ResourceModel should be *resource.Model") - assert.Equal(t, "processor-1", util.SafeString(model.ProcessorName)) - }, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - event, err := resource.FinalizeModel(tc.streamProcessor, tc.currentModel, tc.message) - require.NoError(t, err) - assert.Equal(t, tc.expectedStatus, event.OperationStatus) - assert.Equal(t, tc.expectedMsg, event.Message) - if tc.validateFunc != nil { - tc.validateFunc(t, event) - } - }) - } -} diff --git a/cfn-resources/stream-processor/cmd/resource/mappings.go b/cfn-resources/stream-processor/cmd/resource/mappings.go index 6ddf8eaf8..ec4853fa0 100644 --- a/cfn-resources/stream-processor/cmd/resource/mappings.go +++ b/cfn-resources/stream-processor/cmd/resource/mappings.go @@ -18,7 +18,7 @@ import ( "encoding/json" "fmt" - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/mongodb/mongodbatlas-cloudformation-resources/util" ) diff --git a/cfn-resources/stream-processor/cmd/resource/mappings_test.go b/cfn-resources/stream-processor/cmd/resource/mappings_test.go index e7dc7e745..ca4e5e7f4 100644 --- a/cfn-resources/stream-processor/cmd/resource/mappings_test.go +++ b/cfn-resources/stream-processor/cmd/resource/mappings_test.go @@ -22,7 +22,7 @@ import ( "github.com/mongodb/mongodbatlas-cloudformation-resources/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" ) func assertJSONEqual(t *testing.T, expected, actual string) { diff --git a/cfn-resources/stream-processor/cmd/resource/resource.go b/cfn-resources/stream-processor/cmd/resource/resource.go index 43fe9cdfd..ba0c0f195 100644 --- a/cfn-resources/stream-processor/cmd/resource/resource.go +++ b/cfn-resources/stream-processor/cmd/resource/resource.go @@ -20,7 +20,7 @@ import ( "net/http" "time" - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" diff --git a/cfn-resources/stream-processor/cmd/resource/resource_test.go b/cfn-resources/stream-processor/cmd/resource/resource_test.go deleted file mode 100644 index 1759f294b..000000000 --- a/cfn-resources/stream-processor/cmd/resource/resource_test.go +++ /dev/null @@ -1,513 +0,0 @@ -// Copyright 2026 MongoDB Inc -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource_test - -import ( - "fmt" - "net/http" - "testing" - "time" - - "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" - "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" - "github.com/mongodb/mongodbatlas-cloudformation-resources/util" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20250312010/admin" - "go.mongodb.org/atlas-sdk/v20250312010/mockadmin" -) - -var ( - baseResourceModel = &resource.Model{ - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - WorkspaceName: util.StringPtr("workspace-1"), - Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), - } - validProcessor = &admin.StreamsProcessorWithStats{ - Name: "processor-1", - Id: "507f1f77bcf86cd799439011", - State: resource.CreatedState, - } -) - -func TestList(t *testing.T) { - originalInitEnv := resource.InitEnvWithLatestClient - defer func() { - resource.InitEnvWithLatestClient = originalInitEnv - }() - - testCases := map[string]struct { - currentModel *resource.Model - mockSetup func(*mockadmin.StreamsApi) - expectedStatus handler.Status - expectedCount int - }{ - "successfulListSinglePage": { - currentModel: &resource.Model{ - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - WorkspaceName: util.StringPtr("workspace-1"), - }, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorsApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) - processors := &admin.PaginatedApiStreamsStreamProcessorWithStats{ - Results: &[]admin.StreamsProcessorWithStats{ - {Name: "processor-1", Id: "507f1f77bcf86cd799439011", State: resource.CreatedState}, - {Name: "processor-2", Id: "507f1f77bcf86cd799439012", State: resource.StartedState}, - }, - TotalCount: util.Pointer(2), - } - m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - expectedCount: 2, - }, - "listWithApiError": { - currentModel: &resource.Model{ - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - WorkspaceName: util.StringPtr("workspace-1"), - }, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorsApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("server error")) - }, - expectedStatus: handler.Failed, - expectedCount: 0, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - mockStreamsAPI := mockadmin.NewStreamsApi(t) - tc.mockSetup(mockStreamsAPI) - - mockClient := &admin.APIClient{StreamsApi: mockStreamsAPI} - resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { - return mockClient, nil - } - - event, err := resource.List(handler.Request{RequestContext: handler.RequestContext{}}, nil, tc.currentModel) - require.NoError(t, err) - assert.Equal(t, tc.expectedStatus, event.OperationStatus) - - if tc.expectedStatus == handler.Success { - require.NotNil(t, event.ResourceModels) - assert.Len(t, event.ResourceModels, tc.expectedCount) - } - }) - } -} - -func TestSetup(t *testing.T) { - assert.NotPanics(t, func() { - resource.Setup() - }) -} - -func TestValidationErrors(t *testing.T) { - validationModels := map[string]*resource.Model{ - "missingProjectId": { - ProcessorName: util.StringPtr("processor-1"), - WorkspaceName: util.StringPtr("workspace-1"), - }, - "missingProcessorName": { - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - WorkspaceName: util.StringPtr("workspace-1"), - }, - } - - operations := map[string]func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error){ - "Create": resource.Create, - "Read": resource.Read, - "Update": resource.Update, - "Delete": resource.Delete, - } - - for opName, operation := range operations { - for modelName, model := range validationModels { - t.Run(opName+"_"+modelName, func(t *testing.T) { - event, err := operation(handler.Request{RequestContext: handler.RequestContext{}}, nil, model) - require.NoError(t, err) - assert.Equal(t, handler.Failed, event.OperationStatus) - assert.Contains(t, event.Message, "required") - }) - } - } -} - -func setupMockClient(t *testing.T, mockSetup func(*mockadmin.StreamsApi)) func() { - t.Helper() - originalInitEnv := resource.InitEnvWithLatestClient - mockStreamsAPI := mockadmin.NewStreamsApi(t) - mockSetup(mockStreamsAPI) - - mockClient := &admin.APIClient{StreamsApi: mockStreamsAPI} - resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { - return mockClient, nil - } - - return func() { resource.InitEnvWithLatestClient = originalInitEnv } -} - -func TestCRUDOperations(t *testing.T) { - testCases := map[string]struct { - operation func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) - prevModel *resource.Model - currentModel *resource.Model - mockSetup func(*mockadmin.StreamsApi) - validateResult func(t *testing.T, event handler.ProgressEvent) - expectedStatus handler.Status - req handler.Request - }{ - "Create_invalidState": { - operation: resource.Create, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.DesiredState = util.StringPtr("INVALID_STATE") - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) {}, - expectedStatus: handler.Failed, - }, - "Create_apiError": { - operation: resource.Create, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.CreateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) - m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("API error")) - }, - expectedStatus: handler.Failed, - }, - "Create_withCallback": { - operation: resource.Create, - req: handler.Request{ - CallbackContext: map[string]any{ - "callbackStreamProcessor": true, - "projectID": "507f1f77bcf86cd799439011", - "workspaceName": "workspace-1", - "processorName": "processor-1", - "needsStarting": false, - "startTime": time.Now().Format(time.RFC3339), - "timeoutDuration": "20m", - "deleteOnCreateTimeout": false, - }, - }, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - }, - "Create_withDesiredStateStarted": { - operation: resource.Create, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.DesiredState = util.StringPtr(resource.StartedState) - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.CreateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) - m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.InProgress, - }, - "Create_withTimeouts": { - operation: resource.Create, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.Timeouts = &resource.Timeouts{Create: util.StringPtr("30m")} - m.DeleteOnCreateTimeout = util.Pointer(false) - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.CreateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) - m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.InProgress, - }, - "Create_invalidPipeline": { - operation: resource.Create, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.Pipeline = util.StringPtr("invalid json") - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) {}, - expectedStatus: handler.Failed, - }, - "Create_missingWorkspaceAndInstance": { - operation: resource.Create, - currentModel: &resource.Model{ - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), - }, - mockSetup: func(m *mockadmin.StreamsApi) {}, - expectedStatus: handler.Failed, - }, - "Read_success": { - operation: resource.Read, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - }, - "Read_notFound": { - operation: resource.Read, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) - }, - expectedStatus: handler.Failed, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.Equal(t, "NotFound", event.HandlerErrorCode) - }, - }, - "Read_apiError": { - operation: resource.Read, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) - }, - expectedStatus: handler.Failed, - }, - "Read_missingWorkspaceAndInstance": { - operation: resource.Read, - currentModel: &resource.Model{ - ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), - ProcessorName: util.StringPtr("processor-1"), - }, - mockSetup: func(m *mockadmin.StreamsApi) {}, - expectedStatus: handler.Failed, - }, - "Update_withCallback": { - operation: resource.Update, - req: handler.Request{ - CallbackContext: map[string]any{ - "callbackStreamProcessor": true, - "projectID": "507f1f77bcf86cd799439011", - "workspaceName": "workspace-1", - "processorName": "processor-1", - "desiredState": resource.CreatedState, - }, - }, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) - m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - }, - "Update_notFound": { - operation: resource.Update, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) - }, - expectedStatus: handler.Failed, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.Equal(t, "NotFound", event.HandlerErrorCode) - }, - }, - "Update_invalidStateTransition": { - operation: resource.Update, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.DesiredState = util.StringPtr(resource.CreatedState) - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - processor := &admin.StreamsProcessorWithStats{Name: "processor-1", State: resource.StartedState} - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Failed, - }, - "Update_stopError": { - operation: resource.Update, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.DesiredState = util.StringPtr(resource.StoppedState) - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - processor := &admin.StreamsProcessorWithStats{Name: "processor-1", State: resource.StartedState} - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) - stopReq := admin.StopStreamProcessorApiRequest{ApiService: m} - m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) - m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("stop failed")) - }, - expectedStatus: handler.Failed, - }, - "Update_stopSuccess": { - operation: resource.Update, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.DesiredState = util.StringPtr(resource.StoppedState) - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - processor := &admin.StreamsProcessorWithStats{Name: "processor-1", State: resource.StartedState} - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) - stopReq := admin.StopStreamProcessorApiRequest{ApiService: m} - m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) - m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(&http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.InProgress, - }, - "Update_startError": { - operation: resource.Update, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.DesiredState = util.StringPtr(resource.StartedState) - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) - m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - startReq := admin.StartStreamProcessorApiRequest{ApiService: m} - m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) - m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("start failed")) - }, - expectedStatus: handler.Failed, - }, - "Update_startSuccess": { - operation: resource.Update, - currentModel: func() *resource.Model { - m := *baseResourceModel - m.DesiredState = util.StringPtr(resource.StartedState) - return &m - }(), - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) - m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - startReq := admin.StartStreamProcessorApiRequest{ApiService: m} - m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) - m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) - }, - expectedStatus: handler.InProgress, - }, - "Update_successWithoutStarting": { - operation: resource.Update, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) - m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - }, - "Update_apiError": { - operation: resource.Update, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.GetStreamProcessorApiRequest{ApiService: m} - m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) - m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(validProcessor, &http.Response{StatusCode: 200}, nil) - updateReq := admin.UpdateStreamProcessorApiRequest{ApiService: m} - m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) - m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("update failed")) - }, - expectedStatus: handler.Failed, - }, - "Delete_success": { - operation: resource.Delete, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.DeleteStreamProcessorApiRequest{ApiService: m} - m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) - m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(&http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - }, - "Delete_notFound": { - operation: resource.Delete, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.DeleteStreamProcessorApiRequest{ApiService: m} - m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) - m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(&http.Response{StatusCode: 404}, fmt.Errorf("not found")) - }, - expectedStatus: handler.Failed, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.Equal(t, "NotFound", event.HandlerErrorCode) - }, - }, - "Delete_apiError": { - operation: resource.Delete, - currentModel: baseResourceModel, - mockSetup: func(m *mockadmin.StreamsApi) { - req := admin.DeleteStreamProcessorApiRequest{ApiService: m} - m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) - m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("delete failed")) - }, - expectedStatus: handler.Failed, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - cleanup := setupMockClient(t, tc.mockSetup) - defer cleanup() - - event, err := tc.operation(tc.req, tc.prevModel, tc.currentModel) - require.NoError(t, err) - assert.Equal(t, tc.expectedStatus, event.OperationStatus) - if tc.validateResult != nil { - tc.validateResult(t, event) - } - }) - } -} From 64d662a76a597e9e82b93e1c9d459ce2d13f9ca4 Mon Sep 17 00:00:00 2001 From: sivaram-mongodb Date: Wed, 14 Jan 2026 16:10:53 +0530 Subject: [PATCH 03/10] refactor: extract CRUD handlers and make internal functions private in stream processor --- .../cmd/resource/callbacks.go | 82 ++-- .../stream-processor/cmd/resource/helpers.go | 26 +- .../stream-processor/cmd/resource/resource.go | 426 ++---------------- .../stream-processor/cmd/resource/share.go | 380 ++++++++++++++++ 4 files changed, 471 insertions(+), 443 deletions(-) create mode 100644 cfn-resources/stream-processor/cmd/resource/share.go diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks.go b/cfn-resources/stream-processor/cmd/resource/callbacks.go index 198386de8..fb4cedc42 100644 --- a/cfn-resources/stream-processor/cmd/resource/callbacks.go +++ b/cfn-resources/stream-processor/cmd/resource/callbacks.go @@ -23,6 +23,7 @@ import ( "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" "github.com/mongodb/mongodbatlas-cloudformation-resources/util/logger" ) @@ -38,12 +39,7 @@ type CallbackData struct { DeleteOnCreateTimeout bool } -func IsCallback(req *handler.Request) bool { - _, found := req.CallbackContext["callbackStreamProcessor"] - return found -} - -func GetCallbackData(req handler.Request) *CallbackData { +func getCallbackData(req handler.Request) *CallbackData { ctx := &CallbackData{} if val, ok := req.CallbackContext["projectID"].(string); ok { @@ -74,7 +70,7 @@ func GetCallbackData(req handler.Request) *CallbackData { return ctx } -func ValidateCallbackData(ctx *CallbackData) *handler.ProgressEvent { +func validateCallbackData(ctx *CallbackData) *handler.ProgressEvent { if ctx.ProjectID == "" || ctx.WorkspaceOrInstanceName == "" || ctx.ProcessorName == "" { return &handler.ProgressEvent{ OperationStatus: handler.Failed, @@ -84,7 +80,7 @@ func ValidateCallbackData(ctx *CallbackData) *handler.ProgressEvent { return nil } -func BuildCallbackContext(projectID, workspaceOrInstanceName, processorName string, additionalFields map[string]any) map[string]any { +func buildCallbackContext(projectID, workspaceOrInstanceName, processorName string, additionalFields map[string]any) map[string]any { ctx := map[string]any{ "callbackStreamProcessor": true, "projectID": projectID, @@ -97,27 +93,27 @@ func BuildCallbackContext(projectID, workspaceOrInstanceName, processorName stri return ctx } -func cleanupOnCreateTimeout(ctx context.Context, atlasClient *admin.APIClient, callbackCtx *CallbackData) error { +func cleanupOnCreateTimeout(ctx context.Context, client *util.MongoDBClient, callbackCtx *CallbackData) error { if !callbackCtx.DeleteOnCreateTimeout { return nil } - _, err := atlasClient.StreamsApi.DeleteStreamProcessor(ctx, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName).Execute() + _, err := client.AtlasSDK.StreamsApi.DeleteStreamProcessor(ctx, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName).Execute() if err != nil { _, _ = logger.Warnf("Cleanup delete failed: %v", err) } return nil } -func HandleCreateCallback(ctx context.Context, atlasClient *admin.APIClient, currentModel *Model, callbackCtx *CallbackData) (handler.ProgressEvent, error) { +func handleCreateCallback(ctx context.Context, client *util.MongoDBClient, currentModel *Model, callbackCtx *CallbackData) handler.ProgressEvent { needsStarting := callbackCtx.NeedsStarting - if IsTimeoutExceeded(callbackCtx.StartTime, callbackCtx.TimeoutDuration) { - if err := cleanupOnCreateTimeout(context.Background(), atlasClient, callbackCtx); err != nil { + if isTimeoutExceeded(callbackCtx.StartTime, callbackCtx.TimeoutDuration) { + if err := cleanupOnCreateTimeout(context.Background(), client, callbackCtx); err != nil { return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: fmt.Sprintf("Timeout reached and cleanup failed: %s", err.Error()), - }, nil + } } cleanupMsg := "Timeout reached when waiting for stream processor creation" if callbackCtx.DeleteOnCreateTimeout { @@ -128,17 +124,17 @@ func HandleCreateCallback(ctx context.Context, atlasClient *admin.APIClient, cur return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: cleanupMsg, - }, nil + } } - streamProcessor, peErr := getStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) + streamProcessor, peErr := getStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) if peErr != nil { - return *peErr, nil + return *peErr } currentState := streamProcessor.GetState() - callbackContext := BuildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + callbackContext := buildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ "needsStarting": callbackCtx.NeedsStarting, "startTime": callbackCtx.StartTime, "timeoutDuration": callbackCtx.TimeoutDuration, @@ -148,37 +144,37 @@ func HandleCreateCallback(ctx context.Context, atlasClient *admin.APIClient, cur switch currentState { case CreatedState: if needsStarting { - if peErr := startStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { - return *peErr, nil + if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + return *peErr } - return createInProgressEvent("Starting stream processor", currentModel, callbackContext), nil + return createInProgressEvent("Starting stream processor", currentModel, callbackContext) } - return FinalizeModel(streamProcessor, currentModel, "Create Completed") + return finalizeModel(streamProcessor, currentModel, "Create Completed") case StartedState: - return FinalizeModel(streamProcessor, currentModel, "Create Completed") + return finalizeModel(streamProcessor, currentModel, "Create Completed") case InitiatingState, CreatingState: - return createInProgressEvent(fmt.Sprintf("Creating stream processor (current state: %s)", currentState), currentModel, callbackContext), nil + return createInProgressEvent(fmt.Sprintf("Creating stream processor (current state: %s)", currentState), currentModel, callbackContext) case FailedState: return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Stream processor entered FAILED state", - }, nil + } default: return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: fmt.Sprintf("Unexpected state during creation: %s", currentState), - }, nil + } } } -func HandleUpdateCallback(ctx context.Context, atlasClient *admin.APIClient, currentModel *Model, callbackCtx *CallbackData) (handler.ProgressEvent, error) { - streamProcessor, peErr := getStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) +func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, currentModel *Model, callbackCtx *CallbackData) handler.ProgressEvent { + streamProcessor, peErr := getStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) if peErr != nil { - return *peErr, nil + return *peErr } desiredState := callbackCtx.DesiredState @@ -195,7 +191,7 @@ func HandleUpdateCallback(ctx context.Context, atlasClient *admin.APIClient, cur currentState := streamProcessor.GetState() - callbackContext := BuildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + callbackContext := buildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ "desiredState": desiredState, }) @@ -206,29 +202,29 @@ func HandleUpdateCallback(ctx context.Context, atlasClient *admin.APIClient, cur return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: fmt.Sprintf("Error creating update request: %s", err.Error()), - }, nil + } } - streamProcessorResp, apiResp, err := atlasClient.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() + streamProcessorResp, apiResp, err := client.AtlasSDK.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() if err != nil { - return HandleError(apiResp, constants.UPDATE, err) + return handleError(apiResp, constants.UPDATE, err) } if desiredState == StartedState { - if peErr := startStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { - return *peErr, nil + if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + return *peErr } - return createInProgressEvent("Starting stream processor", currentModel, callbackContext), nil + return createInProgressEvent("Starting stream processor", currentModel, callbackContext) } - return FinalizeModel(streamProcessorResp, currentModel, "Update Completed") + return finalizeModel(streamProcessorResp, currentModel, "Update Completed") case StartedState: if desiredState == StartedState { - return FinalizeModel(streamProcessor, currentModel, "Update Completed") + return finalizeModel(streamProcessor, currentModel, "Update Completed") } - _, err := atlasClient.StreamsApi.StopStreamProcessorWithParams(ctx, + _, err := client.AtlasSDK.StreamsApi.StopStreamProcessorWithParams(ctx, &admin.StopStreamProcessorApiParams{ GroupId: callbackCtx.ProjectID, TenantName: callbackCtx.WorkspaceOrInstanceName, @@ -239,17 +235,17 @@ func HandleUpdateCallback(ctx context.Context, atlasClient *admin.APIClient, cur return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), - }, nil + } } - return createInProgressEvent("Stopping stream processor", currentModel, callbackContext), nil + return createInProgressEvent("Stopping stream processor", currentModel, callbackContext) case FailedState: return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Stream processor entered FAILED state", - }, nil + } default: - return createInProgressEvent(fmt.Sprintf("Updating stream processor (current state: %s)", currentState), currentModel, callbackContext), nil + return createInProgressEvent(fmt.Sprintf("Updating stream processor (current state: %s)", currentState), currentModel, callbackContext) } } diff --git a/cfn-resources/stream-processor/cmd/resource/helpers.go b/cfn-resources/stream-processor/cmd/resource/helpers.go index 1576107b7..a737112ff 100644 --- a/cfn-resources/stream-processor/cmd/resource/helpers.go +++ b/cfn-resources/stream-processor/cmd/resource/helpers.go @@ -30,7 +30,7 @@ import ( "github.com/mongodb/mongodbatlas-cloudformation-resources/util/progressevent" ) -func CopyIdentifyingFields(resourceModel, currentModel *Model) { +func copyIdentifyingFields(resourceModel, currentModel *Model) { resourceModel.Profile = currentModel.Profile resourceModel.ProjectId = currentModel.ProjectId resourceModel.ProcessorName = currentModel.ProcessorName @@ -48,7 +48,7 @@ func CopyIdentifyingFields(resourceModel, currentModel *Model) { } } -func ParseTimeout(timeoutStr string) time.Duration { +func parseTimeout(timeoutStr string) time.Duration { if timeoutStr == "" { return DefaultCreateTimeout } @@ -60,7 +60,7 @@ func ParseTimeout(timeoutStr string) time.Duration { return duration } -func IsTimeoutExceeded(startTimeStr, timeoutDurationStr string) bool { +func isTimeoutExceeded(startTimeStr, timeoutDurationStr string) bool { if startTimeStr == "" || timeoutDurationStr == "" { return false } @@ -71,28 +71,28 @@ func IsTimeoutExceeded(startTimeStr, timeoutDurationStr string) bool { return false } - timeoutDuration := ParseTimeout(timeoutDurationStr) + timeoutDuration := parseTimeout(timeoutDurationStr) elapsed := time.Since(startTime) return elapsed >= timeoutDuration } -func FinalizeModel(streamProcessor *admin.StreamsProcessorWithStats, currentModel *Model, message string) (handler.ProgressEvent, error) { +func finalizeModel(streamProcessor *admin.StreamsProcessorWithStats, currentModel *Model, message string) handler.ProgressEvent { resourceModel, err := GetStreamProcessorModel(streamProcessor, currentModel) if err != nil { return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), - }, nil + } } - CopyIdentifyingFields(resourceModel, currentModel) + copyIdentifyingFields(resourceModel, currentModel) return handler.ProgressEvent{ OperationStatus: handler.Success, Message: message, ResourceModel: resourceModel, - }, nil + } } func getAllStreamProcessors(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName string) ([]admin.StreamsProcessorWithStats, *http.Response, error) { @@ -169,7 +169,7 @@ func createInProgressEvent(message string, currentModel *Model, callbackContext *inProgressModel = *currentModel inProgressModel.DeleteOnCreateTimeout = nil } - CopyIdentifyingFields(inProgressModel, currentModel) + copyIdentifyingFields(inProgressModel, currentModel) return handler.ProgressEvent{ OperationStatus: handler.InProgress, @@ -180,7 +180,7 @@ func createInProgressEvent(message string, currentModel *Model, callbackContext } } -func ValidateUpdateStateTransition(currentState, desiredState string) (errMsg string, isValidTransition bool) { +func validateUpdateStateTransition(currentState, desiredState string) (errMsg string, isValidTransition bool) { if currentState == desiredState { return "", true } @@ -196,7 +196,7 @@ func ValidateUpdateStateTransition(currentState, desiredState string) (errMsg st return "", true } -func HandleError(response *http.Response, method constants.CfnFunctions, err error) (handler.ProgressEvent, error) { +func handleError(response *http.Response, method constants.CfnFunctions, err error) handler.ProgressEvent { errMsg := fmt.Sprintf("%s error:%s", method, err.Error()) if response != nil && response.StatusCode == http.StatusConflict { @@ -204,8 +204,8 @@ func HandleError(response *http.Response, method constants.CfnFunctions, err err OperationStatus: handler.Failed, Message: errMsg, HandlerErrorCode: "AlreadyExists", - }, nil + } } - return progressevent.GetFailedEventByResponse(errMsg, response), nil + return progressevent.GetFailedEventByResponse(errMsg, response) } diff --git a/cfn-resources/stream-processor/cmd/resource/resource.go b/cfn-resources/stream-processor/cmd/resource/resource.go index ba0c0f195..b8d9d7020 100644 --- a/cfn-resources/stream-processor/cmd/resource/resource.go +++ b/cfn-resources/stream-processor/cmd/resource/resource.go @@ -15,15 +15,9 @@ package resource import ( - "context" - "fmt" - "net/http" "time" - "go.mongodb.org/atlas-sdk/v20250312012/admin" - "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" - "github.com/mongodb/mongodbatlas-cloudformation-resources/util" "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" "github.com/mongodb/mongodbatlas-cloudformation-resources/util/validator" @@ -44,403 +38,61 @@ const ( DefaultCreateTimeout = 20 * time.Minute ) -func Setup() { - util.SetupLogger("mongodb-atlas-stream-processor") -} - -var CreateRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} -var ReadRequiredFields = []string{constants.ProjectID, constants.ProcessorName} -var UpdateRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} -var DeleteRequiredFields = []string{constants.ProjectID, constants.ProcessorName} -var ListRequiredFields = []string{constants.ProjectID} - -var InitEnvWithLatestClient = func(req handler.Request, currentModel *Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { - Setup() - util.SetDefaultProfileIfNotDefined(¤tModel.Profile) - - if errEvent := validator.ValidateModel(requiredFields, currentModel); errEvent != nil { - return nil, errEvent - } +var ( + createRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} + readUpdateDeleteRequiredFields = []string{constants.ProjectID, constants.ProcessorName} + listRequiredFields = []string{constants.ProjectID} +) - client, peErr := util.NewAtlasClient(&req, currentModel.Profile) - if peErr != nil { - return nil, peErr +func Create(req handler.Request, prevModel *Model, model *Model) (handler.ProgressEvent, error) { + client, setupErr := setupRequest(req, model, createRequiredFields) + if setupErr != nil { + return *setupErr, nil } - return client.AtlasSDK, nil + return HandleCreate(&req, client, model), nil } -// Create handles the Create event from the Cloudformation service. -func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, CreateRequiredFields) - if peErr != nil { - return *peErr, nil - } - - if IsCallback(&req) { - callbackCtx := GetCallbackData(req) - if peErr := ValidateCallbackData(callbackCtx); peErr != nil { - return *peErr, nil - } - return HandleCreateCallback( - context.Background(), - atlasClient, - currentModel, - callbackCtx, - ) - } - - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - }, nil - } - - ctx := context.Background() - projectID := util.SafeString(currentModel.ProjectId) - processorName := util.SafeString(currentModel.ProcessorName) - - var needsStarting bool - if currentModel.DesiredState != nil { - state := *currentModel.DesiredState - switch state { - case StartedState: - needsStarting = true - case CreatedState: - needsStarting = false - default: - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: "When creating a stream processor, the only valid states are CREATED and STARTED", - }, nil - } - } - - streamProcessorReq, err := NewStreamProcessorReq(currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error creating stream processor request: %s", err.Error()), - }, nil - } - - _, apiResp, err := atlasClient.StreamsApi.CreateStreamProcessor(ctx, projectID, workspaceOrInstanceName, streamProcessorReq).Execute() - if err != nil { - return HandleError(apiResp, constants.CREATE, err) - } - - timeoutStr := "" - if currentModel.Timeouts != nil && currentModel.Timeouts.Create != nil { - timeoutStr = *currentModel.Timeouts.Create +func Read(req handler.Request, prevModel *Model, model *Model) (handler.ProgressEvent, error) { + client, setupErr := setupRequest(req, model, readUpdateDeleteRequiredFields) + if setupErr != nil { + return *setupErr, nil } - - deleteOnCreateTimeout := true - if currentModel.DeleteOnCreateTimeout != nil { - deleteOnCreateTimeout = *currentModel.DeleteOnCreateTimeout - } - - inProgressModel := &Model{} - if currentModel != nil { - *inProgressModel = *currentModel - inProgressModel.DeleteOnCreateTimeout = nil - } - CopyIdentifyingFields(inProgressModel, currentModel) - - return handler.ProgressEvent{ - OperationStatus: handler.InProgress, - Message: "Creating stream processor", - ResourceModel: inProgressModel, - CallbackDelaySeconds: defaultCallbackDelaySeconds, - CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ - "needsStarting": needsStarting, - "startTime": time.Now().Format(time.RFC3339), - "timeoutDuration": timeoutStr, - "deleteOnCreateTimeout": deleteOnCreateTimeout, - }), - }, nil + return HandleRead(&req, client, model), nil } -// Read handles the Read event from the Cloudformation service. -func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, ReadRequiredFields) - if peErr != nil { - return *peErr, nil - } - - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - }, nil +func Update(req handler.Request, prevModel *Model, model *Model) (handler.ProgressEvent, error) { + client, setupErr := setupRequest(req, model, readUpdateDeleteRequiredFields) + if setupErr != nil { + return *setupErr, nil } - - projectID := util.SafeString(currentModel.ProjectId) - processorName := util.SafeString(currentModel.ProcessorName) - - streamProcessor, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(context.Background(), - &admin.GetStreamProcessorApiParams{ - GroupId: projectID, - TenantName: workspaceOrInstanceName, - ProcessorName: processorName, - }).Execute() - if err != nil { - if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: "Resource not found", - HandlerErrorCode: "NotFound", - }, nil - } - return HandleError(apiResp, constants.READ, err) - } - - resourceModel, err := GetStreamProcessorModel(streamProcessor, currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), - }, nil - } - - CopyIdentifyingFields(resourceModel, currentModel) - - return handler.ProgressEvent{ - OperationStatus: handler.Success, - Message: "Read Completed", - ResourceModel: resourceModel, - }, nil + return HandleUpdate(&req, client, prevModel, model), nil } -// Update handles the Update event from the Cloudformation service. -func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, UpdateRequiredFields) - if peErr != nil { - return *peErr, nil +func Delete(req handler.Request, prevModel *Model, model *Model) (handler.ProgressEvent, error) { + client, setupErr := setupRequest(req, model, readUpdateDeleteRequiredFields) + if setupErr != nil { + return *setupErr, nil } - - if IsCallback(&req) { - callbackCtx := GetCallbackData(req) - if peErr := ValidateCallbackData(callbackCtx); peErr != nil { - return *peErr, nil - } - return HandleUpdateCallback( - context.Background(), - atlasClient, - currentModel, - callbackCtx, - ) - } - - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - }, nil - } - - ctx := context.Background() - projectID := util.SafeString(currentModel.ProjectId) - processorName := util.SafeString(currentModel.ProcessorName) - - requestParams := &admin.GetStreamProcessorApiParams{ - GroupId: projectID, - TenantName: workspaceOrInstanceName, - ProcessorName: processorName, - } - - currentStreamProcessor, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() - if err != nil { - if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: "Resource not found", - HandlerErrorCode: "NotFound", - }, nil - } - return HandleError(apiResp, constants.READ, err) - } - - currentState := currentStreamProcessor.GetState() - - desiredState := currentState - if currentModel.DesiredState != nil && *currentModel.DesiredState != "" { - desiredState = *currentModel.DesiredState - } else if prevModel != nil && prevModel.DesiredState != nil && *prevModel.DesiredState != "" { - desiredState = *prevModel.DesiredState - } - - if errMsg, isValid := ValidateUpdateStateTransition(currentState, desiredState); !isValid { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: errMsg, - }, nil - } - - if currentState == StartedState { - _, err := atlasClient.StreamsApi.StopStreamProcessorWithParams(ctx, - &admin.StopStreamProcessorApiParams{ - GroupId: projectID, - TenantName: workspaceOrInstanceName, - ProcessorName: processorName, - }, - ).Execute() - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), - }, nil - } - - inProgressModel := &Model{} - if currentModel != nil { - *inProgressModel = *currentModel - inProgressModel.DeleteOnCreateTimeout = nil - } - CopyIdentifyingFields(inProgressModel, currentModel) - - return handler.ProgressEvent{ - OperationStatus: handler.InProgress, - Message: "Stopping stream processor", - ResourceModel: inProgressModel, - CallbackDelaySeconds: defaultCallbackDelaySeconds, - CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ - "desiredState": desiredState, - }), - }, nil - } - - modifyAPIRequestParams, err := NewStreamProcessorUpdateReq(currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error creating update request: %s", err.Error()), - }, nil - } - - streamProcessorResp, apiResp, err := atlasClient.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() - if err != nil { - return HandleError(apiResp, constants.UPDATE, err) - } - - if desiredState == StartedState { - _, err := atlasClient.StreamsApi.StartStreamProcessorWithParams(ctx, - &admin.StartStreamProcessorApiParams{ - GroupId: projectID, - TenantName: workspaceOrInstanceName, - ProcessorName: processorName, - }, - ).Execute() - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error starting stream processor: %s", err.Error()), - }, nil - } - - inProgressModel := &Model{} - if currentModel != nil { - *inProgressModel = *currentModel - inProgressModel.DeleteOnCreateTimeout = nil - } - CopyIdentifyingFields(inProgressModel, currentModel) - - return handler.ProgressEvent{ - OperationStatus: handler.InProgress, - Message: "Starting stream processor", - ResourceModel: inProgressModel, - CallbackDelaySeconds: defaultCallbackDelaySeconds, - CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ - "desiredState": desiredState, - }), - }, nil - } - - return FinalizeModel(streamProcessorResp, currentModel, "Update Completed") + return HandleDelete(&req, client, model), nil } -// List handles the List event from the Cloudformation service. -func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, ListRequiredFields) - if peErr != nil { - return *peErr, nil - } - - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - }, nil - } - - ctx := context.Background() - projectID := util.SafeString(currentModel.ProjectId) - - accumulatedProcessors, apiResp, err := getAllStreamProcessors(ctx, atlasClient, projectID, workspaceOrInstanceName) - if err != nil { - return HandleError(apiResp, constants.LIST, err) - } - - response := make([]interface{}, 0, len(accumulatedProcessors)) - for i := range accumulatedProcessors { - model, err := GetStreamProcessorModel(&accumulatedProcessors[i], currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), - }, nil - } - - CopyIdentifyingFields(model, currentModel) - response = append(response, model) +func List(req handler.Request, prevModel *Model, model *Model) (handler.ProgressEvent, error) { + client, setupErr := setupRequest(req, model, listRequiredFields) + if setupErr != nil { + return *setupErr, nil } - - return handler.ProgressEvent{ - OperationStatus: handler.Success, - Message: "List Completed", - ResourceModels: response, - }, nil + return HandleList(&req, client, model), nil } -// Delete handles the Delete event from the CloudFormation service. -func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, DeleteRequiredFields) - if peErr != nil { - return *peErr, nil - } - - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - }, nil +func setupRequest(req handler.Request, model *Model, requiredFields []string) (*util.MongoDBClient, *handler.ProgressEvent) { + util.SetupLogger("mongodb-atlas-stream-processor") + if modelValidation := validator.ValidateModel(requiredFields, model); modelValidation != nil { + return nil, modelValidation } - - ctx := context.Background() - projectID := util.SafeString(currentModel.ProjectId) - processorName := util.SafeString(currentModel.ProcessorName) - - apiResp, err := atlasClient.StreamsApi.DeleteStreamProcessor(ctx, projectID, workspaceOrInstanceName, processorName).Execute() - if err != nil { - if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: "Resource not found", - HandlerErrorCode: "NotFound", - }, nil - } - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error deleting stream processor: %s", err.Error()), - }, nil + util.SetDefaultProfileIfNotDefined(&model.Profile) + client, peErr := util.NewAtlasClient(&req, model.Profile) + if peErr != nil { + return nil, peErr } - - return handler.ProgressEvent{ - OperationStatus: handler.Success, - Message: "Delete Completed", - }, nil + return client, nil } diff --git a/cfn-resources/stream-processor/cmd/resource/share.go b/cfn-resources/stream-processor/cmd/resource/share.go new file mode 100644 index 000000000..121a1ef8a --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/share.go @@ -0,0 +1,380 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "context" + "fmt" + "net/http" + "time" + + "go.mongodb.org/atlas-sdk/v20250312012/admin" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" +) + +func isCallback(req *handler.Request) bool { + _, found := req.CallbackContext["callbackStreamProcessor"] + return found +} + +func HandleCreate(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { + if isCallback(req) { + callbackCtx := getCallbackData(*req) + if peErr := validateCallbackData(callbackCtx); peErr != nil { + return *peErr + } + return handleCreateCallback( + context.Background(), + client, + model, + callbackCtx, + ) + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + } + } + + ctx := context.Background() + projectID := util.SafeString(model.ProjectId) + processorName := util.SafeString(model.ProcessorName) + + var needsStarting bool + if model.DesiredState != nil { + state := *model.DesiredState + switch state { + case StartedState: + needsStarting = true + case CreatedState: + needsStarting = false + default: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "When creating a stream processor, the only valid states are CREATED and STARTED", + } + } + } + + streamProcessorReq, err := NewStreamProcessorReq(model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating stream processor request: %s", err.Error()), + } + } + + _, apiResp, err := client.AtlasSDK.StreamsApi.CreateStreamProcessor(ctx, projectID, workspaceOrInstanceName, streamProcessorReq).Execute() + if err != nil { + return handleError(apiResp, constants.CREATE, err) + } + + timeoutStr := "" + if model.Timeouts != nil && model.Timeouts.Create != nil { + timeoutStr = *model.Timeouts.Create + } + + deleteOnCreateTimeout := true + if model.DeleteOnCreateTimeout != nil { + deleteOnCreateTimeout = *model.DeleteOnCreateTimeout + } + + inProgressModel := &Model{} + if model != nil { + *inProgressModel = *model + inProgressModel.DeleteOnCreateTimeout = nil + } + copyIdentifyingFields(inProgressModel, model) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Creating stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: defaultCallbackDelaySeconds, + CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "needsStarting": needsStarting, + "startTime": time.Now().Format(time.RFC3339), + "timeoutDuration": timeoutStr, + "deleteOnCreateTimeout": deleteOnCreateTimeout, + }), + } +} + +func HandleRead(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + } + } + + projectID := util.SafeString(model.ProjectId) + processorName := util.SafeString(model.ProcessorName) + + streamProcessor, apiResp, err := client.AtlasSDK.StreamsApi.GetStreamProcessorWithParams(context.Background(), + &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + } + } + return handleError(apiResp, constants.READ, err) + } + + resourceModel, err := GetStreamProcessorModel(streamProcessor, model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + } + } + + copyIdentifyingFields(resourceModel, model) + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "Read Completed", + ResourceModel: resourceModel, + } +} + +func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *Model, model *Model) handler.ProgressEvent { + if isCallback(req) { + callbackCtx := getCallbackData(*req) + if peErr := validateCallbackData(callbackCtx); peErr != nil { + return *peErr + } + return handleUpdateCallback( + context.Background(), + client, + model, + callbackCtx, + ) + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + } + } + + ctx := context.Background() + projectID := util.SafeString(model.ProjectId) + processorName := util.SafeString(model.ProcessorName) + + requestParams := &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + } + + currentStreamProcessor, apiResp, err := client.AtlasSDK.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + } + } + return handleError(apiResp, constants.READ, err) + } + + currentState := currentStreamProcessor.GetState() + + desiredState := currentState + if model.DesiredState != nil && *model.DesiredState != "" { + desiredState = *model.DesiredState + } else if prevModel != nil && prevModel.DesiredState != nil && *prevModel.DesiredState != "" { + desiredState = *prevModel.DesiredState + } + + if errMsg, isValid := validateUpdateStateTransition(currentState, desiredState); !isValid { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: errMsg, + } + } + + if currentState == StartedState { + _, err := client.AtlasSDK.StreamsApi.StopStreamProcessorWithParams(ctx, + &admin.StopStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), + } + } + + inProgressModel := &Model{} + if model != nil { + *inProgressModel = *model + inProgressModel.DeleteOnCreateTimeout = nil + } + copyIdentifyingFields(inProgressModel, model) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Stopping stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: defaultCallbackDelaySeconds, + CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "desiredState": desiredState, + }), + } + } + + modifyAPIRequestParams, err := NewStreamProcessorUpdateReq(model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating update request: %s", err.Error()), + } + } + + streamProcessorResp, apiResp, err := client.AtlasSDK.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() + if err != nil { + return handleError(apiResp, constants.UPDATE, err) + } + + if desiredState == StartedState { + _, err := client.AtlasSDK.StreamsApi.StartStreamProcessorWithParams(ctx, + &admin.StartStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error starting stream processor: %s", err.Error()), + } + } + + inProgressModel := &Model{} + if model != nil { + *inProgressModel = *model + inProgressModel.DeleteOnCreateTimeout = nil + } + copyIdentifyingFields(inProgressModel, model) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Starting stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: defaultCallbackDelaySeconds, + CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "desiredState": desiredState, + }), + } + } + + return finalizeModel(streamProcessorResp, model, "Update Completed") +} + +func HandleDelete(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + } + } + + ctx := context.Background() + projectID := util.SafeString(model.ProjectId) + processorName := util.SafeString(model.ProcessorName) + + apiResp, err := client.AtlasSDK.StreamsApi.DeleteStreamProcessor(ctx, projectID, workspaceOrInstanceName, processorName).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + } + } + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error deleting stream processor: %s", err.Error()), + } + } + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "Delete Completed", + } +} + +func HandleList(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + } + } + + ctx := context.Background() + projectID := util.SafeString(model.ProjectId) + + accumulatedProcessors, apiResp, err := getAllStreamProcessors(ctx, client.AtlasSDK, projectID, workspaceOrInstanceName) + if err != nil { + return handleError(apiResp, constants.LIST, err) + } + + response := make([]interface{}, 0, len(accumulatedProcessors)) + for i := range accumulatedProcessors { + modelItem, err := GetStreamProcessorModel(&accumulatedProcessors[i], model) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + } + } + + copyIdentifyingFields(modelItem, model) + response = append(response, modelItem) + } + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "List Completed", + ResourceModels: response, + } +} From a0573da407e15007e5a73647e91b9ed673f879c9 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Wed, 14 Jan 2026 11:53:41 -0500 Subject: [PATCH 04/10] CLOUDP-368428-Stream-Processor Constants and Aligning with Flex Cluster --- .../cmd/resource/callbacks.go | 18 ++++++++--------- .../stream-processor/cmd/resource/share.go | 20 +++++++++---------- 2 files changed, 19 insertions(+), 19 deletions(-) diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks.go b/cfn-resources/stream-processor/cmd/resource/callbacks.go index fb4cedc42..07a425e73 100644 --- a/cfn-resources/stream-processor/cmd/resource/callbacks.go +++ b/cfn-resources/stream-processor/cmd/resource/callbacks.go @@ -147,15 +147,15 @@ func handleCreateCallback(ctx context.Context, client *util.MongoDBClient, curre if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { return *peErr } - return createInProgressEvent("Starting stream processor", currentModel, callbackContext) + return createInProgressEvent(constants.Pending, currentModel, callbackContext) } - return finalizeModel(streamProcessor, currentModel, "Create Completed") + return finalizeModel(streamProcessor, currentModel, constants.Complete) case StartedState: - return finalizeModel(streamProcessor, currentModel, "Create Completed") + return finalizeModel(streamProcessor, currentModel, constants.Complete) case InitiatingState, CreatingState: - return createInProgressEvent(fmt.Sprintf("Creating stream processor (current state: %s)", currentState), currentModel, callbackContext) + return createInProgressEvent(constants.Pending, currentModel, callbackContext) case FailedState: return handler.ProgressEvent{ @@ -214,14 +214,14 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { return *peErr } - return createInProgressEvent("Starting stream processor", currentModel, callbackContext) + return createInProgressEvent(constants.Pending, currentModel, callbackContext) } - return finalizeModel(streamProcessorResp, currentModel, "Update Completed") + return finalizeModel(streamProcessorResp, currentModel, constants.Complete) case StartedState: if desiredState == StartedState { - return finalizeModel(streamProcessor, currentModel, "Update Completed") + return finalizeModel(streamProcessor, currentModel, constants.Complete) } _, err := client.AtlasSDK.StreamsApi.StopStreamProcessorWithParams(ctx, @@ -237,7 +237,7 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), } } - return createInProgressEvent("Stopping stream processor", currentModel, callbackContext) + return createInProgressEvent(constants.Pending, currentModel, callbackContext) case FailedState: return handler.ProgressEvent{ @@ -246,6 +246,6 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre } default: - return createInProgressEvent(fmt.Sprintf("Updating stream processor (current state: %s)", currentState), currentModel, callbackContext) + return createInProgressEvent(constants.Pending, currentModel, callbackContext) } } diff --git a/cfn-resources/stream-processor/cmd/resource/share.go b/cfn-resources/stream-processor/cmd/resource/share.go index 121a1ef8a..627c0c796 100644 --- a/cfn-resources/stream-processor/cmd/resource/share.go +++ b/cfn-resources/stream-processor/cmd/resource/share.go @@ -28,13 +28,13 @@ import ( "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" ) -func isCallback(req *handler.Request) bool { +func IsCallback(req *handler.Request) bool { _, found := req.CallbackContext["callbackStreamProcessor"] return found } func HandleCreate(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { - if isCallback(req) { + if IsCallback(req) { callbackCtx := getCallbackData(*req) if peErr := validateCallbackData(callbackCtx); peErr != nil { return *peErr @@ -107,7 +107,7 @@ func HandleCreate(req *handler.Request, client *util.MongoDBClient, model *Model return handler.ProgressEvent{ OperationStatus: handler.InProgress, - Message: "Creating stream processor", + Message: constants.Pending, ResourceModel: inProgressModel, CallbackDelaySeconds: defaultCallbackDelaySeconds, CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ @@ -160,13 +160,13 @@ func HandleRead(req *handler.Request, client *util.MongoDBClient, model *Model) return handler.ProgressEvent{ OperationStatus: handler.Success, - Message: "Read Completed", + Message: constants.ReadComplete, ResourceModel: resourceModel, } } func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *Model, model *Model) handler.ProgressEvent { - if isCallback(req) { + if IsCallback(req) { callbackCtx := getCallbackData(*req) if peErr := validateCallbackData(callbackCtx); peErr != nil { return *peErr @@ -249,7 +249,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M return handler.ProgressEvent{ OperationStatus: handler.InProgress, - Message: "Stopping stream processor", + Message: constants.Pending, ResourceModel: inProgressModel, CallbackDelaySeconds: defaultCallbackDelaySeconds, CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ @@ -295,7 +295,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M return handler.ProgressEvent{ OperationStatus: handler.InProgress, - Message: "Starting stream processor", + Message: constants.Pending, ResourceModel: inProgressModel, CallbackDelaySeconds: defaultCallbackDelaySeconds, CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ @@ -304,7 +304,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M } } - return finalizeModel(streamProcessorResp, model, "Update Completed") + return finalizeModel(streamProcessorResp, model, constants.Complete) } func HandleDelete(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { @@ -337,7 +337,7 @@ func HandleDelete(req *handler.Request, client *util.MongoDBClient, model *Model return handler.ProgressEvent{ OperationStatus: handler.Success, - Message: "Delete Completed", + Message: constants.Complete, } } @@ -374,7 +374,7 @@ func HandleList(req *handler.Request, client *util.MongoDBClient, model *Model) return handler.ProgressEvent{ OperationStatus: handler.Success, - Message: "List Completed", + Message: constants.Complete, ResourceModels: response, } } From e25ee80ff1066a0249a355ac3301f25218e0f562 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Thu, 15 Jan 2026 10:49:45 -0500 Subject: [PATCH 05/10] CLOUDP-368428-Stream-Processor Internal Review --- .../stream-processor/mongodb-atlas-streamprocessor.json | 3 +++ .../stream-processor/test/inputs_1_create.template.json | 2 +- .../stream-processor/test/inputs_1_update.template.json | 2 +- .../stream-processor/test/inputs_2_create.template.json | 2 +- .../stream-processor/test/inputs_2_update.template.json | 2 +- .../stream-processor/test/inputs_3_create.template.json | 2 +- .../stream-processor/test/inputs_3_update.template.json | 2 +- .../stream-processor/test/inputs_4_create.template.json | 2 +- .../stream-processor/test/inputs_4_update.template.json | 2 +- .../stream-processor/test/inputs_5_create.template.json | 2 +- .../stream-processor/test/inputs_5_update.template.json | 2 +- .../test/stream-processor.sample-cfn-request.json | 2 +- 12 files changed, 14 insertions(+), 11 deletions(-) diff --git a/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json index 6f9778809..b49b9eb86 100644 --- a/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json +++ b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json @@ -138,6 +138,9 @@ }, "delete": { "permissions": ["secretsmanager:GetSecretValue"] + }, + "list": { + "permissions": ["secretsmanager:GetSecretValue"] } }, "documentationUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources/blob/master/cfn-resources/stream-processor/README.md", diff --git a/cfn-resources/stream-processor/test/inputs_1_create.template.json b/cfn-resources/stream-processor/test/inputs_1_create.template.json index 82b52ad69..7e0a720ec 100644 --- a/cfn-resources/stream-processor/test/inputs_1_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_1_create.template.json @@ -4,5 +4,5 @@ "WorkspaceName": "", "ProcessorName": "test-processor-1", "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", - "State": "CREATED" + "DesiredState": "CREATED" } diff --git a/cfn-resources/stream-processor/test/inputs_1_update.template.json b/cfn-resources/stream-processor/test/inputs_1_update.template.json index 82b52ad69..7e0a720ec 100644 --- a/cfn-resources/stream-processor/test/inputs_1_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_1_update.template.json @@ -4,5 +4,5 @@ "WorkspaceName": "", "ProcessorName": "test-processor-1", "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", - "State": "CREATED" + "DesiredState": "CREATED" } diff --git a/cfn-resources/stream-processor/test/inputs_2_create.template.json b/cfn-resources/stream-processor/test/inputs_2_create.template.json index 6d805b84b..3d2ac71a5 100644 --- a/cfn-resources/stream-processor/test/inputs_2_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_2_create.template.json @@ -4,7 +4,7 @@ "WorkspaceName": "", "ProcessorName": "test-processor-2", "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", - "State": "STARTED", + "DesiredState": "STARTED", "Timeouts": { "Create": "25m" }, diff --git a/cfn-resources/stream-processor/test/inputs_2_update.template.json b/cfn-resources/stream-processor/test/inputs_2_update.template.json index 0a21a02be..2653d8846 100644 --- a/cfn-resources/stream-processor/test/inputs_2_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_2_update.template.json @@ -4,5 +4,5 @@ "WorkspaceName": "", "ProcessorName": "test-processor-2", "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", - "State": "STOPPED" + "DesiredState": "STOPPED" } diff --git a/cfn-resources/stream-processor/test/inputs_3_create.template.json b/cfn-resources/stream-processor/test/inputs_3_create.template.json index 6a1de915b..b9b2acbcf 100644 --- a/cfn-resources/stream-processor/test/inputs_3_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_3_create.template.json @@ -5,7 +5,7 @@ "InstanceName": "", "ProcessorName": "test-processor-3", "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", - "State": "CREATED", + "DesiredState": "CREATED", "Options": { "Dlq": { "Coll": "dlq-collection", diff --git a/cfn-resources/stream-processor/test/inputs_3_update.template.json b/cfn-resources/stream-processor/test/inputs_3_update.template.json index 0b1c1fba6..4eb1d4397 100644 --- a/cfn-resources/stream-processor/test/inputs_3_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_3_update.template.json @@ -5,7 +5,7 @@ "InstanceName": "", "ProcessorName": "test-processor-3", "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", - "State": "CREATED", + "DesiredState": "CREATED", "Options": { "Dlq": { "Coll": "dlq-collection-updated", diff --git a/cfn-resources/stream-processor/test/inputs_4_create.template.json b/cfn-resources/stream-processor/test/inputs_4_create.template.json index 2ee322dc9..6a8553746 100644 --- a/cfn-resources/stream-processor/test/inputs_4_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_4_create.template.json @@ -5,5 +5,5 @@ "InstanceName": "", "ProcessorName": "test-processor-4-kafka-to-cluster", "Pipeline": "[{\"$source\": {\"connectionName\": \"KAFKA_SOURCE_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}, {\"$emit\": {\"connectionName\": \"CLUSTER_SINK_CONNECTION_PLACEHOLDER\", \"db\": \"kafka\", \"coll\": \"kafka_messages\", \"timeseries\": {\"timeField\": \"ts\"}}}]", - "State": "CREATED" + "DesiredState": "CREATED" } diff --git a/cfn-resources/stream-processor/test/inputs_4_update.template.json b/cfn-resources/stream-processor/test/inputs_4_update.template.json index afbe861ba..dd9ad3bb8 100644 --- a/cfn-resources/stream-processor/test/inputs_4_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_4_update.template.json @@ -5,5 +5,5 @@ "InstanceName": "", "ProcessorName": "test-processor-4-kafka-to-cluster", "Pipeline": "[{\"$source\": {\"connectionName\": \"KAFKA_SOURCE_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}, {\"$emit\": {\"connectionName\": \"CLUSTER_SINK_CONNECTION_PLACEHOLDER\", \"db\": \"kafka\", \"coll\": \"kafka_messages_updated\", \"timeseries\": {\"timeField\": \"ts\"}}}]", - "State": "CREATED" + "DesiredState": "CREATED" } diff --git a/cfn-resources/stream-processor/test/inputs_5_create.template.json b/cfn-resources/stream-processor/test/inputs_5_create.template.json index f0956470c..40f84d146 100644 --- a/cfn-resources/stream-processor/test/inputs_5_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_5_create.template.json @@ -5,5 +5,5 @@ "InstanceName": "", "ProcessorName": "test-processor-5-cluster-to-kafka", "Pipeline": "[{\"$source\": {\"connectionName\": \"CLUSTER_SOURCE_CONNECTION_PLACEHOLDER\"}}, {\"$emit\": {\"connectionName\": \"KAFKA_SINK_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}]", - "State": "CREATED" + "DesiredState": "CREATED" } diff --git a/cfn-resources/stream-processor/test/inputs_5_update.template.json b/cfn-resources/stream-processor/test/inputs_5_update.template.json index 558e63ead..16591f441 100644 --- a/cfn-resources/stream-processor/test/inputs_5_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_5_update.template.json @@ -5,5 +5,5 @@ "InstanceName": "", "ProcessorName": "test-processor-5-cluster-to-kafka", "Pipeline": "[{\"$source\": {\"connectionName\": \"CLUSTER_SOURCE_CONNECTION_PLACEHOLDER\"}}, {\"$emit\": {\"connectionName\": \"KAFKA_SINK_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic_updated\"}}]", - "State": "CREATED" + "DesiredState": "CREATED" } diff --git a/cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json b/cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json index bdf7671df..434caeccc 100644 --- a/cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json +++ b/cfn-resources/stream-processor/test/stream-processor.sample-cfn-request.json @@ -5,7 +5,7 @@ "WorkspaceName": "", "ProcessorName": "sample-processor", "Pipeline": "[{\"$match\": {\"status\": \"active\"}}]", - "State": "CREATED" + "DesiredState": "CREATED" }, "previousResourceState": {} } From 47853bacb66f534ce05f67c6b9a99c8201bac337 Mon Sep 17 00:00:00 2001 From: sivaram-mongodb Date: Mon, 19 Jan 2026 17:25:22 +0530 Subject: [PATCH 06/10] refactor(stream-processor): address review comments --- cfn-resources/stream-processor/README.md | 2 +- .../cmd/resource/callbacks.go | 41 +++++++------ .../cmd/resource/{share.go => handlers.go} | 61 +++++-------------- .../stream-processor/cmd/resource/helpers.go | 25 +++----- .../stream-processor/cmd/resource/mappings.go | 17 +----- .../cmd/resource/mappings_test.go | 34 ----------- .../stream-processor/cmd/resource/model.go | 1 - .../stream-processor/cmd/resource/resource.go | 6 +- cfn-resources/stream-processor/docs/README.md | 16 +---- .../mongodb-atlas-streamprocessor.json | 10 +-- cfn-resources/stream-processor/test/README.md | 35 ++++++----- .../test/cfn-test-create-inputs.sh | 11 ---- .../test/cfn-test-delete-inputs.sh | 2 +- .../test/inputs_3_create.template.json | 1 - .../test/inputs_3_update.template.json | 1 - .../test/inputs_4_create.template.json | 1 - .../test/inputs_4_update.template.json | 1 - .../test/inputs_5_create.template.json | 1 - .../test/inputs_5_update.template.json | 1 - cfn-resources/util/constants/constants.go | 1 + 20 files changed, 76 insertions(+), 192 deletions(-) rename cfn-resources/stream-processor/cmd/resource/{share.go => handlers.go} (85%) diff --git a/cfn-resources/stream-processor/README.md b/cfn-resources/stream-processor/README.md index d0899e0f0..443a57974 100644 --- a/cfn-resources/stream-processor/README.md +++ b/cfn-resources/stream-processor/README.md @@ -138,7 +138,7 @@ For processors with DLQ: ## Notes - **AWS Only**: This CloudFormation resource is designed for AWS deployments. The provider is effectively AWS. -- **WorkspaceName vs InstanceName**: Use `WorkspaceName` (preferred). `InstanceName` is supported for backward compatibility but is deprecated. +- **WorkspaceName**: This field is the same as 'InstanceName' used in other stream resources. - **State Management**: When creating a processor, specify `State: STARTED` to automatically start processing, or `State: CREATED` to create it in a stopped state. - **Long-Running Operations**: Creating and starting stream processors can take several minutes. The resource uses callback-based state management to handle these operations asynchronously. - **Timeout Configuration**: Use `Timeouts.Create` to configure how long to wait for processor creation/startup (default: 20 minutes). diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks.go b/cfn-resources/stream-processor/cmd/resource/callbacks.go index 07a425e73..0dd1ca582 100644 --- a/cfn-resources/stream-processor/cmd/resource/callbacks.go +++ b/cfn-resources/stream-processor/cmd/resource/callbacks.go @@ -29,14 +29,14 @@ import ( ) type CallbackData struct { - ProjectID string - WorkspaceOrInstanceName string - ProcessorName string - DesiredState string - StartTime string - TimeoutDuration string - NeedsStarting bool - DeleteOnCreateTimeout bool + ProjectID string + WorkspaceName string + ProcessorName string + DesiredState string + StartTime string + TimeoutDuration string + NeedsStarting bool + DeleteOnCreateTimeout bool } func getCallbackData(req handler.Request) *CallbackData { @@ -46,7 +46,7 @@ func getCallbackData(req handler.Request) *CallbackData { ctx.ProjectID = val } if val, ok := req.CallbackContext["workspaceName"].(string); ok { - ctx.WorkspaceOrInstanceName = val + ctx.WorkspaceName = val } if val, ok := req.CallbackContext["processorName"].(string); ok { ctx.ProcessorName = val @@ -71,7 +71,7 @@ func getCallbackData(req handler.Request) *CallbackData { } func validateCallbackData(ctx *CallbackData) *handler.ProgressEvent { - if ctx.ProjectID == "" || ctx.WorkspaceOrInstanceName == "" || ctx.ProcessorName == "" { + if ctx.ProjectID == "" || ctx.WorkspaceName == "" || ctx.ProcessorName == "" { return &handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Missing required values in callback context", @@ -80,11 +80,11 @@ func validateCallbackData(ctx *CallbackData) *handler.ProgressEvent { return nil } -func buildCallbackContext(projectID, workspaceOrInstanceName, processorName string, additionalFields map[string]any) map[string]any { +func buildCallbackContext(projectID, workspaceName, processorName string, additionalFields map[string]any) map[string]any { ctx := map[string]any{ "callbackStreamProcessor": true, "projectID": projectID, - "workspaceName": workspaceOrInstanceName, + "workspaceName": workspaceName, "processorName": processorName, } @@ -98,9 +98,10 @@ func cleanupOnCreateTimeout(ctx context.Context, client *util.MongoDBClient, cal return nil } - _, err := client.AtlasSDK.StreamsApi.DeleteStreamProcessor(ctx, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName).Execute() + _, err := client.AtlasSDK.StreamsApi.DeleteStreamProcessor(ctx, callbackCtx.ProjectID, callbackCtx.WorkspaceName, callbackCtx.ProcessorName).Execute() if err != nil { _, _ = logger.Warnf("Cleanup delete failed: %v", err) + return err } return nil } @@ -127,14 +128,14 @@ func handleCreateCallback(ctx context.Context, client *util.MongoDBClient, curre } } - streamProcessor, peErr := getStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) + streamProcessor, peErr := getStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceName, callbackCtx.ProcessorName) if peErr != nil { return *peErr } currentState := streamProcessor.GetState() - callbackContext := buildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + callbackContext := buildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceName, callbackCtx.ProcessorName, map[string]any{ "needsStarting": callbackCtx.NeedsStarting, "startTime": callbackCtx.StartTime, "timeoutDuration": callbackCtx.TimeoutDuration, @@ -144,7 +145,7 @@ func handleCreateCallback(ctx context.Context, client *util.MongoDBClient, curre switch currentState { case CreatedState: if needsStarting { - if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceName, callbackCtx.ProcessorName); peErr != nil { return *peErr } return createInProgressEvent(constants.Pending, currentModel, callbackContext) @@ -172,7 +173,7 @@ func handleCreateCallback(ctx context.Context, client *util.MongoDBClient, curre } func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, currentModel *Model, callbackCtx *CallbackData) handler.ProgressEvent { - streamProcessor, peErr := getStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) + streamProcessor, peErr := getStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceName, callbackCtx.ProcessorName) if peErr != nil { return *peErr } @@ -191,7 +192,7 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre currentState := streamProcessor.GetState() - callbackContext := buildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + callbackContext := buildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceName, callbackCtx.ProcessorName, map[string]any{ "desiredState": desiredState, }) @@ -211,7 +212,7 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre } if desiredState == StartedState { - if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + if peErr := startStreamProcessor(ctx, client.AtlasSDK, callbackCtx.ProjectID, callbackCtx.WorkspaceName, callbackCtx.ProcessorName); peErr != nil { return *peErr } return createInProgressEvent(constants.Pending, currentModel, callbackContext) @@ -227,7 +228,7 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre _, err := client.AtlasSDK.StreamsApi.StopStreamProcessorWithParams(ctx, &admin.StopStreamProcessorApiParams{ GroupId: callbackCtx.ProjectID, - TenantName: callbackCtx.WorkspaceOrInstanceName, + TenantName: callbackCtx.WorkspaceName, ProcessorName: callbackCtx.ProcessorName, }, ).Execute() diff --git a/cfn-resources/stream-processor/cmd/resource/share.go b/cfn-resources/stream-processor/cmd/resource/handlers.go similarity index 85% rename from cfn-resources/stream-processor/cmd/resource/share.go rename to cfn-resources/stream-processor/cmd/resource/handlers.go index 627c0c796..ff8e93608 100644 --- a/cfn-resources/stream-processor/cmd/resource/share.go +++ b/cfn-resources/stream-processor/cmd/resource/handlers.go @@ -47,13 +47,7 @@ func HandleCreate(req *handler.Request, client *util.MongoDBClient, model *Model ) } - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - } - } + workspaceName := util.SafeString(model.WorkspaceName) ctx := context.Background() projectID := util.SafeString(model.ProjectId) @@ -83,7 +77,7 @@ func HandleCreate(req *handler.Request, client *util.MongoDBClient, model *Model } } - _, apiResp, err := client.AtlasSDK.StreamsApi.CreateStreamProcessor(ctx, projectID, workspaceOrInstanceName, streamProcessorReq).Execute() + _, apiResp, err := client.AtlasSDK.StreamsApi.CreateStreamProcessor(ctx, projectID, workspaceName, streamProcessorReq).Execute() if err != nil { return handleError(apiResp, constants.CREATE, err) } @@ -110,7 +104,7 @@ func HandleCreate(req *handler.Request, client *util.MongoDBClient, model *Model Message: constants.Pending, ResourceModel: inProgressModel, CallbackDelaySeconds: defaultCallbackDelaySeconds, - CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + CallbackContext: buildCallbackContext(projectID, workspaceName, processorName, map[string]any{ "needsStarting": needsStarting, "startTime": time.Now().Format(time.RFC3339), "timeoutDuration": timeoutStr, @@ -120,21 +114,14 @@ func HandleCreate(req *handler.Request, client *util.MongoDBClient, model *Model } func HandleRead(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - } - } - + workspaceName := util.SafeString(model.WorkspaceName) projectID := util.SafeString(model.ProjectId) processorName := util.SafeString(model.ProcessorName) streamProcessor, apiResp, err := client.AtlasSDK.StreamsApi.GetStreamProcessorWithParams(context.Background(), &admin.GetStreamProcessorApiParams{ GroupId: projectID, - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ProcessorName: processorName, }).Execute() if err != nil { @@ -179,13 +166,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M ) } - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - } - } + workspaceName := util.SafeString(model.WorkspaceName) ctx := context.Background() projectID := util.SafeString(model.ProjectId) @@ -193,7 +174,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M requestParams := &admin.GetStreamProcessorApiParams{ GroupId: projectID, - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ProcessorName: processorName, } @@ -229,7 +210,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M _, err := client.AtlasSDK.StreamsApi.StopStreamProcessorWithParams(ctx, &admin.StopStreamProcessorApiParams{ GroupId: projectID, - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ProcessorName: processorName, }, ).Execute() @@ -252,7 +233,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M Message: constants.Pending, ResourceModel: inProgressModel, CallbackDelaySeconds: defaultCallbackDelaySeconds, - CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + CallbackContext: buildCallbackContext(projectID, workspaceName, processorName, map[string]any{ "desiredState": desiredState, }), } @@ -275,7 +256,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M _, err := client.AtlasSDK.StreamsApi.StartStreamProcessorWithParams(ctx, &admin.StartStreamProcessorApiParams{ GroupId: projectID, - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ProcessorName: processorName, }, ).Execute() @@ -298,7 +279,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M Message: constants.Pending, ResourceModel: inProgressModel, CallbackDelaySeconds: defaultCallbackDelaySeconds, - CallbackContext: buildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + CallbackContext: buildCallbackContext(projectID, workspaceName, processorName, map[string]any{ "desiredState": desiredState, }), } @@ -308,19 +289,13 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M } func HandleDelete(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - } - } + workspaceName := util.SafeString(model.WorkspaceName) ctx := context.Background() projectID := util.SafeString(model.ProjectId) processorName := util.SafeString(model.ProcessorName) - apiResp, err := client.AtlasSDK.StreamsApi.DeleteStreamProcessor(ctx, projectID, workspaceOrInstanceName, processorName).Execute() + apiResp, err := client.AtlasSDK.StreamsApi.DeleteStreamProcessor(ctx, projectID, workspaceName, processorName).Execute() if err != nil { if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { return handler.ProgressEvent{ @@ -342,18 +317,12 @@ func HandleDelete(req *handler.Request, client *util.MongoDBClient, model *Model } func HandleList(req *handler.Request, client *util.MongoDBClient, model *Model) handler.ProgressEvent { - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: err.Error(), - } - } + workspaceName := util.SafeString(model.WorkspaceName) ctx := context.Background() projectID := util.SafeString(model.ProjectId) - accumulatedProcessors, apiResp, err := getAllStreamProcessors(ctx, client.AtlasSDK, projectID, workspaceOrInstanceName) + accumulatedProcessors, apiResp, err := getAllStreamProcessors(ctx, client.AtlasSDK, projectID, workspaceName) if err != nil { return handleError(apiResp, constants.LIST, err) } diff --git a/cfn-resources/stream-processor/cmd/resource/helpers.go b/cfn-resources/stream-processor/cmd/resource/helpers.go index a737112ff..724c3bd1b 100644 --- a/cfn-resources/stream-processor/cmd/resource/helpers.go +++ b/cfn-resources/stream-processor/cmd/resource/helpers.go @@ -34,18 +34,7 @@ func copyIdentifyingFields(resourceModel, currentModel *Model) { resourceModel.Profile = currentModel.Profile resourceModel.ProjectId = currentModel.ProjectId resourceModel.ProcessorName = currentModel.ProcessorName - - switch { - case currentModel.WorkspaceName != nil && *currentModel.WorkspaceName != "": - resourceModel.WorkspaceName = currentModel.WorkspaceName - resourceModel.InstanceName = util.Pointer(*currentModel.WorkspaceName) - case currentModel.InstanceName != nil && *currentModel.InstanceName != "": - resourceModel.InstanceName = currentModel.InstanceName - resourceModel.WorkspaceName = util.Pointer(*currentModel.InstanceName) - default: - resourceModel.WorkspaceName = currentModel.WorkspaceName - resourceModel.InstanceName = currentModel.InstanceName - } + resourceModel.WorkspaceName = currentModel.WorkspaceName } func parseTimeout(timeoutStr string) time.Duration { @@ -95,14 +84,14 @@ func finalizeModel(streamProcessor *admin.StreamsProcessorWithStats, currentMode } } -func getAllStreamProcessors(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName string) ([]admin.StreamsProcessorWithStats, *http.Response, error) { +func getAllStreamProcessors(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceName string) ([]admin.StreamsProcessorWithStats, *http.Response, error) { pageNum := 1 accumulatedProcessors := make([]admin.StreamsProcessorWithStats, 0) for allRecordsRetrieved := false; !allRecordsRetrieved; { processorsResp, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorsWithParams(ctx, &admin.GetStreamProcessorsApiParams{ GroupId: projectID, - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ItemsPerPage: util.Pointer(constants.DefaultListItemsPerPage), PageNum: util.Pointer(pageNum), }).Execute() @@ -122,10 +111,10 @@ func getAllStreamProcessors(ctx context.Context, atlasClient *admin.APIClient, p return accumulatedProcessors, nil, nil } -func getStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName, processorName string) (*admin.StreamsProcessorWithStats, *handler.ProgressEvent) { +func getStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceName, processorName string) (*admin.StreamsProcessorWithStats, *handler.ProgressEvent) { requestParams := &admin.GetStreamProcessorApiParams{ GroupId: projectID, - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ProcessorName: processorName, } @@ -146,11 +135,11 @@ func getStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, proje return streamProcessor, nil } -func startStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName, processorName string) *handler.ProgressEvent { +func startStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceName, processorName string) *handler.ProgressEvent { _, err := atlasClient.StreamsApi.StartStreamProcessorWithParams(ctx, &admin.StartStreamProcessorApiParams{ GroupId: projectID, - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ProcessorName: processorName, }, ).Execute() diff --git a/cfn-resources/stream-processor/cmd/resource/mappings.go b/cfn-resources/stream-processor/cmd/resource/mappings.go index ec4853fa0..76f5c3ac2 100644 --- a/cfn-resources/stream-processor/cmd/resource/mappings.go +++ b/cfn-resources/stream-processor/cmd/resource/mappings.go @@ -23,16 +23,6 @@ import ( "github.com/mongodb/mongodbatlas-cloudformation-resources/util" ) -func GetWorkspaceOrInstanceName(model *Model) (string, error) { - if model.WorkspaceName != nil && *model.WorkspaceName != "" { - return *model.WorkspaceName, nil - } - if model.InstanceName != nil && *model.InstanceName != "" { - return *model.InstanceName, nil - } - return "", fmt.Errorf("either WorkspaceName or InstanceName must be provided") -} - func ConvertPipelineToSdk(pipeline string) ([]any, error) { var pipelineSliceOfMaps []any err := json.Unmarshal([]byte(pipeline), &pipelineSliceOfMaps) @@ -96,14 +86,11 @@ func NewStreamProcessorUpdateReq(model *Model) (*admin.UpdateStreamProcessorApiP return nil, err } - workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) - if err != nil { - return nil, err - } + workspaceName := util.SafeString(model.WorkspaceName) streamProcessorAPIParams := &admin.UpdateStreamProcessorApiParams{ GroupId: util.SafeString(model.ProjectId), - TenantName: workspaceOrInstanceName, + TenantName: workspaceName, ProcessorName: util.SafeString(model.ProcessorName), StreamsModifyStreamProcessor: &admin.StreamsModifyStreamProcessor{ Name: model.ProcessorName, diff --git a/cfn-resources/stream-processor/cmd/resource/mappings_test.go b/cfn-resources/stream-processor/cmd/resource/mappings_test.go index ca4e5e7f4..c5a156cb6 100644 --- a/cfn-resources/stream-processor/cmd/resource/mappings_test.go +++ b/cfn-resources/stream-processor/cmd/resource/mappings_test.go @@ -33,40 +33,6 @@ func assertJSONEqual(t *testing.T, expected, actual string) { assert.Equal(t, expectedJSON, actualJSON) } -func TestGetWorkspaceOrInstanceName(t *testing.T) { - testCases := map[string]struct { - model *resource.Model - expectedResult string - expectedError string - }{ - "workspaceName": { - model: &resource.Model{WorkspaceName: util.StringPtr("workspace-1")}, - expectedResult: "workspace-1", - }, - "instanceName": { - model: &resource.Model{InstanceName: util.StringPtr("instance-1")}, - expectedResult: "instance-1", - }, - "neitherSet": { - model: &resource.Model{}, - expectedError: "either WorkspaceName or InstanceName must be provided", - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - result, err := resource.GetWorkspaceOrInstanceName(tc.model) - if tc.expectedError != "" { - require.Error(t, err) - assert.Contains(t, err.Error(), tc.expectedError) - } else { - require.NoError(t, err) - assert.Equal(t, tc.expectedResult, result) - } - }) - } -} - func TestConvertPipelineToSdk(t *testing.T) { testCases := map[string]struct { pipeline string diff --git a/cfn-resources/stream-processor/cmd/resource/model.go b/cfn-resources/stream-processor/cmd/resource/model.go index 282c8e1a8..8d05e02fd 100644 --- a/cfn-resources/stream-processor/cmd/resource/model.go +++ b/cfn-resources/stream-processor/cmd/resource/model.go @@ -6,7 +6,6 @@ package resource type Model struct { Profile *string `json:",omitempty"` ProjectId *string `json:",omitempty"` - InstanceName *string `json:",omitempty"` WorkspaceName *string `json:",omitempty"` ProcessorName *string `json:",omitempty"` Pipeline *string `json:",omitempty"` diff --git a/cfn-resources/stream-processor/cmd/resource/resource.go b/cfn-resources/stream-processor/cmd/resource/resource.go index b8d9d7020..f02018fc8 100644 --- a/cfn-resources/stream-processor/cmd/resource/resource.go +++ b/cfn-resources/stream-processor/cmd/resource/resource.go @@ -39,9 +39,9 @@ const ( ) var ( - createRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} - readUpdateDeleteRequiredFields = []string{constants.ProjectID, constants.ProcessorName} - listRequiredFields = []string{constants.ProjectID} + createRequiredFields = []string{constants.ProjectID, constants.WorkspaceName, constants.ProcessorName, constants.Pipeline} + readUpdateDeleteRequiredFields = []string{constants.ProjectID, constants.WorkspaceName, constants.ProcessorName} + listRequiredFields = []string{constants.ProjectID, constants.WorkspaceName} ) func Create(req handler.Request, prevModel *Model, model *Model) (handler.ProgressEvent, error) { diff --git a/cfn-resources/stream-processor/docs/README.md b/cfn-resources/stream-processor/docs/README.md index 3cae09fda..8eab21b46 100644 --- a/cfn-resources/stream-processor/docs/README.md +++ b/cfn-resources/stream-processor/docs/README.md @@ -14,7 +14,6 @@ To declare this entity in your AWS CloudFormation template, use the following sy "Properties" : { "Profile" : String, "ProjectId" : String, - "InstanceName" : String, "WorkspaceName" : String, "ProcessorName" : String, "Pipeline" : String, @@ -33,7 +32,6 @@ Type: MongoDB::Atlas::StreamProcessor Properties: Profile: String ProjectId: String - InstanceName: String WorkspaceName: String ProcessorName: String Pipeline: String @@ -73,21 +71,11 @@ _Pattern_: ^([a-f0-9]{24})$ _Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) -#### InstanceName - -Label that identifies the stream processing workspace. This field is deprecated in favor of WorkspaceName. Exactly one of InstanceName or WorkspaceName must be provided. - -_Required_: No - -_Type_: String - -_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) - #### WorkspaceName -Label that identifies the stream processing workspace. This is the preferred field name. Exactly one of InstanceName or WorkspaceName must be provided. +Label that identifies the stream processing workspace. -_Required_: No +_Required_: Yes _Type_: String diff --git a/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json index b49b9eb86..9999078ab 100644 --- a/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json +++ b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json @@ -59,13 +59,9 @@ "minLength": 24, "pattern": "^([a-f0-9]{24})$" }, - "InstanceName": { - "type": "string", - "description": "Label that identifies the stream processing workspace. This field is deprecated in favor of WorkspaceName. Exactly one of InstanceName or WorkspaceName must be provided." - }, "WorkspaceName": { "type": "string", - "description": "Label that identifies the stream processing workspace. This is the preferred field name. Exactly one of InstanceName or WorkspaceName must be provided." + "description": "Label that identifies the stream processing workspace." }, "ProcessorName": { "type": "string", @@ -105,7 +101,7 @@ } }, "additionalProperties": false, - "required": ["ProjectId", "ProcessorName", "Pipeline"], + "required": ["ProjectId", "WorkspaceName", "ProcessorName", "Pipeline"], "readOnlyProperties": [ "/properties/Id", "/properties/Stats", @@ -114,14 +110,12 @@ "writeOnlyProperties": ["/properties/DeleteOnCreateTimeout"], "primaryIdentifier": [ "/properties/ProjectId", - "/properties/InstanceName", "/properties/WorkspaceName", "/properties/ProcessorName", "/properties/Profile" ], "createOnlyProperties": [ "/properties/ProjectId", - "/properties/InstanceName", "/properties/WorkspaceName", "/properties/ProcessorName", "/properties/Profile" diff --git a/cfn-resources/stream-processor/test/README.md b/cfn-resources/stream-processor/test/README.md index c52bf2154..54bc6e0e0 100644 --- a/cfn-resources/stream-processor/test/README.md +++ b/cfn-resources/stream-processor/test/README.md @@ -1,12 +1,15 @@ # MongoDB::Atlas::StreamProcessor ## Impact + The following components use this resource and are potentially impacted by any changes. They should also be validated to ensure the changes do not cause a regression. - - Stream Processor L1 CDK constructor +- Stream Processor L1 CDK constructor ## Prerequisites + ### Resources needed to run the manual QA + All resources are created as part of `cfn-testing-helper.sh`: - Atlas Project @@ -17,8 +20,8 @@ All resources are created as part of `cfn-testing-helper.sh`: **IMPORTANT**: Stream Instance/Workspace creation is a LONG-RUNNING operation that can take 10-30+ minutes. The `cfn-test-create-inputs.sh` script will create the workspace and wait for it to be ready before proceeding. ## Manual QA -Please follow the steps in [TESTING.md](../../../TESTING.md). +Please follow the steps in [TESTING.md](../../../TESTING.md). ### Success criteria when testing the resource @@ -27,6 +30,7 @@ Please follow the steps in [TESTING.md](../../../TESTING.md). A Stream Processor should be created in the specified test project for the specified Atlas Stream workspace/instance: **Atlas UI Verification:** + - Navigate to Atlas UI → Your Project → Stream Processing - Select the stream workspace/instance used in the test - Go to the **Processors** tab @@ -39,11 +43,13 @@ A Stream Processor should be created in the specified test project for the speci - Merge target connection, database, and collection are correct **Atlas CLI Verification:** + ```bash atlas streams processors describe \ --instance \ --projectId ``` + - Verify `id` field is present (matches CloudFormation `Id` attribute) - Verify `name` matches `ProcessorName` - Verify `state` matches `State` parameter @@ -52,6 +58,7 @@ atlas streams processors describe \ #### 2. DLQ Configuration Verification (inputs_3) For processors with DLQ configuration: + - In Atlas UI: Verify DLQ settings are displayed in processor details - Via Atlas CLI: Verify `options.dlq` object contains: - `connectionName`: Matches `Options.Dlq.ConnectionName` @@ -61,19 +68,16 @@ For processors with DLQ configuration: #### 3. Backward Compatibility Testing Test both field names work correctly: + - **Test with `WorkspaceName`** (preferred field): - Create processor using `WorkspaceName` parameter - Verify processor is created successfully - - Verify both `WorkspaceName` and `InstanceName` are set in returned model (for primary identifier) -- **Test with `InstanceName`** (deprecated field): - - Create processor using `InstanceName` parameter - - Verify processor is created successfully - - Verify both `WorkspaceName` and `InstanceName` are set in returned model - - Verify `WorkspaceName` is automatically set from `InstanceName` for forward compatibility + - Verify `WorkspaceName` is set in returned model (for primary identifier) #### 4. State Transition Testing Test all valid state transitions: + - **Create with `State: CREATED`**: - Verify processor is created in CREATED state - Verify processor does not start processing automatically @@ -106,9 +110,9 @@ Test all valid state transitions: #### 6. Primary Identifier Verification Verify all primary identifier fields are present in returned models: + - `ProjectId`: Always present -- `WorkspaceName`: Always present (set from `InstanceName` if needed) -- `InstanceName`: Always present (set from `WorkspaceName` if needed) +- `WorkspaceName`: Always present - `ProcessorName`: Always present - `Profile`: Always present @@ -117,14 +121,15 @@ This is critical for CloudFormation to properly track the resource. #### 7. General CFN Resource Success Criteria Ensure general [CFN resource success criteria](../../../TESTING.md#success-criteria-when-testing-the-resource) for this resource is met: + - All CRUD operations work correctly - Read-after-Create returns correct values - Update operations preserve primary identifier - Delete operations clean up resources - Error handling is appropriate - ## Important Links + - [API Documentation](https://www.mongodb.com/docs/api/doc/atlas-admin-api-v2/group/endpoint-streams) - [Resource Usage Documentation](https://www.mongodb.com/docs/atlas/atlas-sp/overview/) @@ -135,7 +140,9 @@ The local tests are integrated with the AWS `sam local` and `cfn invoke` tooling ``` sam local start-lambda --skip-pull-image ``` + then in another shell: + ```bash repo_root=$(git rev-parse --show-toplevel) cd ${repo_root}/cfn-resources/stream-processor @@ -152,10 +159,10 @@ The test directory contains the following input files: - `inputs_1_create.template.json` / `inputs_1_update.template.json`: Basic stream processor with WorkspaceName, CREATED state - `inputs_2_create.template.json` / `inputs_2_update.template.json`: Stream processor with STARTED state, timeout configuration, and DeleteOnCreateTimeout -- `inputs_3_create.template.json` / `inputs_3_update.template.json`: Stream processor with InstanceName (backward compatibility) and DLQ options +- `inputs_3_create.template.json` / `inputs_3_update.template.json`: Stream processor with DLQ options All input files respect: + - AWS-only behavior (no Azure/GCP-only parameters) -- Required fields: ProjectId, ProcessorName, Pipeline -- Backward compatibility: Supports both WorkspaceName and InstanceName +- Required fields: ProjectId, WorkspaceName, ProcessorName, Pipeline - Schema validation: All fields match the final CFN schema diff --git a/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh b/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh index 27796c305..286f77378 100755 --- a/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh +++ b/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh @@ -185,7 +185,6 @@ echo -e "Created Kafka Sink Connection \"${kafkaSinkConnectionName}\"\n" # Generate input files # Reuse connectionName from inputs_3 for inputs_1 and inputs_2 sink (saves creating another cluster) -# Also set InstanceName from WorkspaceName for primary identifier (both fields required) jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ @@ -193,7 +192,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ "$(dirname "$0")/inputs_1_create.template.json" >"inputs/inputs_1_create.json" @@ -204,7 +202,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ "$(dirname "$0")/inputs_1_update.template.json" >"inputs/inputs_1_update.json" @@ -215,7 +212,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ "$(dirname "$0")/inputs_2_create.template.json" >"inputs/inputs_2_create.json" @@ -226,7 +222,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ "$(dirname "$0")/inputs_2_update.template.json" >"inputs/inputs_2_update.json" @@ -237,7 +232,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Options.Dlq.ConnectionName?|=$connection_name | .Pipeline?|=gsub("CONNECTION_NAME_PLACEHOLDER"; $connection_name)' \ "$(dirname "$0")/inputs_3_create.template.json" >"inputs/inputs_3_create.json" @@ -249,7 +243,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Options.Dlq.ConnectionName?|=$connection_name | .Pipeline?|=gsub("CONNECTION_NAME_PLACEHOLDER"; $connection_name)' \ "$(dirname "$0")/inputs_3_update.template.json" >"inputs/inputs_3_update.json" @@ -263,7 +256,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("KAFKA_SOURCE_CONNECTION_PLACEHOLDER"; $kafka_source) | .Pipeline?|=gsub("CLUSTER_SINK_CONNECTION_PLACEHOLDER"; $cluster_sink)' \ "$(dirname "$0")/inputs_4_create.template.json" >"inputs/inputs_4_create.json" @@ -276,7 +268,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("KAFKA_SOURCE_CONNECTION_PLACEHOLDER"; $kafka_source) | .Pipeline?|=gsub("CLUSTER_SINK_CONNECTION_PLACEHOLDER"; $cluster_sink)' \ "$(dirname "$0")/inputs_4_update.template.json" >"inputs/inputs_4_update.json" @@ -290,7 +281,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("CLUSTER_SOURCE_CONNECTION_PLACEHOLDER"; $cluster_source) | .Pipeline?|=gsub("KAFKA_SINK_CONNECTION_PLACEHOLDER"; $kafka_sink)' \ "$(dirname "$0")/inputs_5_create.template.json" >"inputs/inputs_5_create.json" @@ -303,7 +293,6 @@ jq --arg workspace_name "$workspaceName" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name - | .InstanceName?|=$workspace_name | .Pipeline?|=gsub("CLUSTER_SOURCE_CONNECTION_PLACEHOLDER"; $cluster_source) | .Pipeline?|=gsub("KAFKA_SINK_CONNECTION_PLACEHOLDER"; $kafka_sink)' \ "$(dirname "$0")/inputs_5_update.template.json" >"inputs/inputs_5_update.json" diff --git a/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh b/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh index 65212620f..4e6b15de7 100755 --- a/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh +++ b/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh @@ -11,7 +11,7 @@ function usage { } projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) -workspaceName=$(jq -r '.WorkspaceName // .InstanceName' ./inputs/inputs_1_create.json) +workspaceName=$(jq -r '.WorkspaceName' ./inputs/inputs_1_create.json) processorName1=$(jq -r '.ProcessorName' ./inputs/inputs_1_create.json) processorName2=$(jq -r '.ProcessorName' ./inputs/inputs_2_create.json) processorName3=$(jq -r '.ProcessorName' ./inputs/inputs_3_create.json) diff --git a/cfn-resources/stream-processor/test/inputs_3_create.template.json b/cfn-resources/stream-processor/test/inputs_3_create.template.json index b9b2acbcf..4e61dd3a5 100644 --- a/cfn-resources/stream-processor/test/inputs_3_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_3_create.template.json @@ -2,7 +2,6 @@ "Profile": "default", "ProjectId": "", "WorkspaceName": "", - "InstanceName": "", "ProcessorName": "test-processor-3", "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", "DesiredState": "CREATED", diff --git a/cfn-resources/stream-processor/test/inputs_3_update.template.json b/cfn-resources/stream-processor/test/inputs_3_update.template.json index 4eb1d4397..3d928c19c 100644 --- a/cfn-resources/stream-processor/test/inputs_3_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_3_update.template.json @@ -2,7 +2,6 @@ "Profile": "default", "ProjectId": "", "WorkspaceName": "", - "InstanceName": "", "ProcessorName": "test-processor-3", "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", "DesiredState": "CREATED", diff --git a/cfn-resources/stream-processor/test/inputs_4_create.template.json b/cfn-resources/stream-processor/test/inputs_4_create.template.json index 6a8553746..dc62f70aa 100644 --- a/cfn-resources/stream-processor/test/inputs_4_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_4_create.template.json @@ -2,7 +2,6 @@ "Profile": "default", "ProjectId": "", "WorkspaceName": "", - "InstanceName": "", "ProcessorName": "test-processor-4-kafka-to-cluster", "Pipeline": "[{\"$source\": {\"connectionName\": \"KAFKA_SOURCE_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}, {\"$emit\": {\"connectionName\": \"CLUSTER_SINK_CONNECTION_PLACEHOLDER\", \"db\": \"kafka\", \"coll\": \"kafka_messages\", \"timeseries\": {\"timeField\": \"ts\"}}}]", "DesiredState": "CREATED" diff --git a/cfn-resources/stream-processor/test/inputs_4_update.template.json b/cfn-resources/stream-processor/test/inputs_4_update.template.json index dd9ad3bb8..9214a44db 100644 --- a/cfn-resources/stream-processor/test/inputs_4_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_4_update.template.json @@ -2,7 +2,6 @@ "Profile": "default", "ProjectId": "", "WorkspaceName": "", - "InstanceName": "", "ProcessorName": "test-processor-4-kafka-to-cluster", "Pipeline": "[{\"$source\": {\"connectionName\": \"KAFKA_SOURCE_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}, {\"$emit\": {\"connectionName\": \"CLUSTER_SINK_CONNECTION_PLACEHOLDER\", \"db\": \"kafka\", \"coll\": \"kafka_messages_updated\", \"timeseries\": {\"timeField\": \"ts\"}}}]", "DesiredState": "CREATED" diff --git a/cfn-resources/stream-processor/test/inputs_5_create.template.json b/cfn-resources/stream-processor/test/inputs_5_create.template.json index 40f84d146..e58c256ac 100644 --- a/cfn-resources/stream-processor/test/inputs_5_create.template.json +++ b/cfn-resources/stream-processor/test/inputs_5_create.template.json @@ -2,7 +2,6 @@ "Profile": "default", "ProjectId": "", "WorkspaceName": "", - "InstanceName": "", "ProcessorName": "test-processor-5-cluster-to-kafka", "Pipeline": "[{\"$source\": {\"connectionName\": \"CLUSTER_SOURCE_CONNECTION_PLACEHOLDER\"}}, {\"$emit\": {\"connectionName\": \"KAFKA_SINK_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic\"}}]", "DesiredState": "CREATED" diff --git a/cfn-resources/stream-processor/test/inputs_5_update.template.json b/cfn-resources/stream-processor/test/inputs_5_update.template.json index 16591f441..8abc048e2 100644 --- a/cfn-resources/stream-processor/test/inputs_5_update.template.json +++ b/cfn-resources/stream-processor/test/inputs_5_update.template.json @@ -2,7 +2,6 @@ "Profile": "default", "ProjectId": "", "WorkspaceName": "", - "InstanceName": "", "ProcessorName": "test-processor-5-cluster-to-kafka", "Pipeline": "[{\"$source\": {\"connectionName\": \"CLUSTER_SOURCE_CONNECTION_PLACEHOLDER\"}}, {\"$emit\": {\"connectionName\": \"KAFKA_SINK_CONNECTION_PLACEHOLDER\", \"topic\": \"random_topic_updated\"}}]", "DesiredState": "CREATED" diff --git a/cfn-resources/util/constants/constants.go b/cfn-resources/util/constants/constants.go index d9de1bd9d..5e1dc590b 100644 --- a/cfn-resources/util/constants/constants.go +++ b/cfn-resources/util/constants/constants.go @@ -160,4 +160,5 @@ const ( ProcessorName = "ProcessorName" Pipeline = "Pipeline" + WorkspaceName = "WorkspaceName" ) From 3420198407f0f60f8cca90e43c0a272974e52124 Mon Sep 17 00:00:00 2001 From: Rakhul S Prakash Date: Tue, 20 Jan 2026 09:49:42 +0530 Subject: [PATCH 07/10] Add contract testing workflow --- .github/workflows/contract-testing.yaml | 52 +++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/.github/workflows/contract-testing.yaml b/.github/workflows/contract-testing.yaml index 4820758d1..7bc45c065 100644 --- a/.github/workflows/contract-testing.yaml +++ b/.github/workflows/contract-testing.yaml @@ -30,6 +30,7 @@ jobs: search-deployment: ${{ steps.filter.outputs.search-deployment }} stream-connection: ${{ steps.filter.outputs.stream-connection }} stream-instance: ${{ steps.filter.outputs.stream-instance }} + stream-processor: ${{ steps.filter.outputs.stream-processor }} stream-workspace: ${{ steps.filter.outputs.stream-workspace }} steps: - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 @@ -76,6 +77,8 @@ jobs: - 'cfn-resources/stream-connection/**' stream-instance: - 'cfn-resources/stream-instance/**' + stream-processor: + - 'cfn-resources/stream-processor/**' stream-workspace: - 'cfn-resources/stream-workspace/**' access-list-api-key: @@ -855,6 +858,55 @@ jobs: pushd cfn-resources/stream-instance make create-test-resources cat inputs/inputs_1_create.json + make run-contract-testing + make delete-test-resources + stream-processor: + needs: change-detection + if: ${{ needs.change-detection.outputs.stream-processor == 'true' }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 + - uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 + with: + go-version-file: 'cfn-resources/go.mod' + - name: setup Atlas CLI + uses: mongodb/atlas-github-action@e3c9e0204659bafbb3b65e1eb1ee745cca0e9f3b + - uses: aws-actions/setup-sam@c2a20b1822cc4a6bc594ff7f1dbb658758e383c3 + with: + use-installer: true + - uses: aws-actions/configure-aws-credentials@61815dcd50bd041e203e49132bacad1fd04d2708 + with: + aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID_TEST_ENV }} + aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY_TEST_ENV }} + aws-region: eu-west-1 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 + with: + python-version: '3.9' + cache: 'pip' # caching pip dependencies + - run: pip install cloudformation-cli cloudformation-cli-go-plugin + - name: Run the Contract test + shell: bash + env: + MONGODB_ATLAS_PUBLIC_API_KEY: ${{ secrets.CLOUD_DEV_PUBLIC_KEY }} + MONGODB_ATLAS_PRIVATE_API_KEY: ${{ secrets.CLOUD_DEV_PRIVATE_KEY }} + MONGODB_ATLAS_ORG_ID: ${{ secrets.CLOUD_DEV_ORG_ID }} + MONGODB_ATLAS_OPS_MANAGER_URL: ${{ vars.MONGODB_ATLAS_BASE_URL }} + MONGODB_ATLAS_PROFILE: cfn-cloud-dev-github-action + run: | + cd cfn-resources/stream-processor + make create-test-resources + + cat inputs/inputs_1_create.json + cat inputs/inputs_1_update.json + cat inputs/inputs_2_create.json + cat inputs/inputs_2_update.json + cat inputs/inputs_3_create.json + cat inputs/inputs_3_update.json + cat inputs/inputs_4_create.json + cat inputs/inputs_4_update.json + cat inputs/inputs_5_create.json + cat inputs/inputs_5_update.json + make run-contract-testing make delete-test-resources stream-workspace: From 45bcdb8e3e2adc50117e9fc091024b5dd31fc4aa Mon Sep 17 00:00:00 2001 From: Rakhul S Prakash Date: Tue, 20 Jan 2026 09:59:55 +0530 Subject: [PATCH 08/10] Use util.http_status helper functions --- cfn-resources/stream-processor/cmd/resource/handlers.go | 7 +++---- cfn-resources/stream-processor/cmd/resource/helpers.go | 4 ++-- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/cfn-resources/stream-processor/cmd/resource/handlers.go b/cfn-resources/stream-processor/cmd/resource/handlers.go index ff8e93608..076949896 100644 --- a/cfn-resources/stream-processor/cmd/resource/handlers.go +++ b/cfn-resources/stream-processor/cmd/resource/handlers.go @@ -17,7 +17,6 @@ package resource import ( "context" "fmt" - "net/http" "time" "go.mongodb.org/atlas-sdk/v20250312012/admin" @@ -125,7 +124,7 @@ func HandleRead(req *handler.Request, client *util.MongoDBClient, model *Model) ProcessorName: processorName, }).Execute() if err != nil { - if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + if util.StatusNotFound(apiResp) { return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Resource not found", @@ -180,7 +179,7 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M currentStreamProcessor, apiResp, err := client.AtlasSDK.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() if err != nil { - if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + if util.StatusNotFound(apiResp) { return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Resource not found", @@ -297,7 +296,7 @@ func HandleDelete(req *handler.Request, client *util.MongoDBClient, model *Model apiResp, err := client.AtlasSDK.StreamsApi.DeleteStreamProcessor(ctx, projectID, workspaceName, processorName).Execute() if err != nil { - if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + if util.StatusNotFound(apiResp) { return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Resource not found", diff --git a/cfn-resources/stream-processor/cmd/resource/helpers.go b/cfn-resources/stream-processor/cmd/resource/helpers.go index 724c3bd1b..b98185a04 100644 --- a/cfn-resources/stream-processor/cmd/resource/helpers.go +++ b/cfn-resources/stream-processor/cmd/resource/helpers.go @@ -120,7 +120,7 @@ func getStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, proje streamProcessor, resp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() if err != nil { - if resp != nil && resp.StatusCode == http.StatusNotFound { + if util.StatusNotFound(resp) { return nil, &handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Stream processor not found", @@ -188,7 +188,7 @@ func validateUpdateStateTransition(currentState, desiredState string) (errMsg st func handleError(response *http.Response, method constants.CfnFunctions, err error) handler.ProgressEvent { errMsg := fmt.Sprintf("%s error:%s", method, err.Error()) - if response != nil && response.StatusCode == http.StatusConflict { + if util.StatusConflict(response) { return handler.ProgressEvent{ OperationStatus: handler.Failed, Message: errMsg, From 301fa66511d03b1999376b256ff4cc1bbc37b3db Mon Sep 17 00:00:00 2001 From: Rakhul S Prakash Date: Tue, 20 Jan 2026 10:36:23 +0530 Subject: [PATCH 09/10] Update workflow --- .../test/contract-testing/cfn-test-delete.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh b/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh index 71286ddfb..a1e063f38 100755 --- a/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh +++ b/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh @@ -5,11 +5,4 @@ set -o errexit set -o nounset set -o pipefail -projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) - -# delete project -if atlas projects delete "$projectId" --force; then - echo "$projectId project deletion OK" -else - (echo "Failed cleaning project: $projectId" && exit 1) -fi +./test/cfn-test-delete-inputs.sh From bca5ac58c3868671eb01799a6145e12ea7563f67 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Tue, 20 Jan 2026 11:51:25 -0500 Subject: [PATCH 10/10] CLOUDP-368428-Stream-Processor Addressing review comments --- .github/workflows/contract-testing.yaml | 13 ++-------- cfn-resources/stream-processor/Makefile | 2 +- .../cmd/resource/callbacks.go | 24 ++++++++++++------- .../stream-processor/cmd/resource/handlers.go | 14 ++--------- .../stream-processor/cmd/resource/helpers.go | 2 ++ 5 files changed, 23 insertions(+), 32 deletions(-) diff --git a/.github/workflows/contract-testing.yaml b/.github/workflows/contract-testing.yaml index 7bc45c065..81eedd449 100644 --- a/.github/workflows/contract-testing.yaml +++ b/.github/workflows/contract-testing.yaml @@ -896,16 +896,7 @@ jobs: cd cfn-resources/stream-processor make create-test-resources - cat inputs/inputs_1_create.json - cat inputs/inputs_1_update.json - cat inputs/inputs_2_create.json - cat inputs/inputs_2_update.json - cat inputs/inputs_3_create.json - cat inputs/inputs_3_update.json - cat inputs/inputs_4_create.json - cat inputs/inputs_4_update.json - cat inputs/inputs_5_create.json - cat inputs/inputs_5_update.json + cat inputs/* make run-contract-testing make delete-test-resources @@ -949,4 +940,4 @@ jobs: cat inputs/inputs_1_update.json make run-contract-testing - make delete-test-resources \ No newline at end of file + make delete-test-resources diff --git a/cfn-resources/stream-processor/Makefile b/cfn-resources/stream-processor/Makefile index b4c27f329..ed0cb0d06 100644 --- a/cfn-resources/stream-processor/Makefile +++ b/cfn-resources/stream-processor/Makefile @@ -38,4 +38,4 @@ run-contract-testing: @echo "==> Run contract testing" make build sam local start-lambda & - cfn test --function-name TestEntrypoint --verbose \ No newline at end of file + cfn test --function-name TestEntrypoint --verbose diff --git a/cfn-resources/stream-processor/cmd/resource/callbacks.go b/cfn-resources/stream-processor/cmd/resource/callbacks.go index 0dd1ca582..07a2e52e4 100644 --- a/cfn-resources/stream-processor/cmd/resource/callbacks.go +++ b/cfn-resources/stream-processor/cmd/resource/callbacks.go @@ -118,7 +118,7 @@ func handleCreateCallback(ctx context.Context, client *util.MongoDBClient, curre } cleanupMsg := "Timeout reached when waiting for stream processor creation" if callbackCtx.DeleteOnCreateTimeout { - cleanupMsg += ". Resource has been deleted because delete_on_create_timeout is true. If you suspect a transient error, wait before retrying to allow resource deletion to finish." + cleanupMsg += ". Deletion of resource has been triggered because delete_on_create_timeout is true. If you suspect a transient error, wait before retrying to allow resource deletion to finish." } else { cleanupMsg += ". Cleanup was not performed because delete_on_create_timeout is false." } @@ -181,13 +181,12 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre desiredState := callbackCtx.DesiredState if desiredState == "" { desiredState = streamProcessor.GetState() - if desiredState == "" { - if currentModel != nil && currentModel.DesiredState != nil && *currentModel.DesiredState != "" { - desiredState = *currentModel.DesiredState - } else { - desiredState = CreatedState - } - } + } + if desiredState == "" && currentModel != nil && currentModel.DesiredState != nil && *currentModel.DesiredState != "" { + desiredState = *currentModel.DesiredState + } + if desiredState == "" { + desiredState = CreatedState } currentState := streamProcessor.GetState() @@ -225,6 +224,15 @@ func handleUpdateCallback(ctx context.Context, client *util.MongoDBClient, curre return finalizeModel(streamProcessor, currentModel, constants.Complete) } + // Only StoppedState is a valid transition from StartedState + // (CreatedState transitions are not allowed per validateUpdateStateTransition) + if desiredState != StoppedState { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Unexpected desired state %s when current state is %s. Only %s is allowed.", desiredState, StartedState, StoppedState), + } + } + _, err := client.AtlasSDK.StreamsApi.StopStreamProcessorWithParams(ctx, &admin.StopStreamProcessorApiParams{ GroupId: callbackCtx.ProjectID, diff --git a/cfn-resources/stream-processor/cmd/resource/handlers.go b/cfn-resources/stream-processor/cmd/resource/handlers.go index 076949896..9f6f23170 100644 --- a/cfn-resources/stream-processor/cmd/resource/handlers.go +++ b/cfn-resources/stream-processor/cmd/resource/handlers.go @@ -252,18 +252,8 @@ func HandleUpdate(req *handler.Request, client *util.MongoDBClient, prevModel *M } if desiredState == StartedState { - _, err := client.AtlasSDK.StreamsApi.StartStreamProcessorWithParams(ctx, - &admin.StartStreamProcessorApiParams{ - GroupId: projectID, - TenantName: workspaceName, - ProcessorName: processorName, - }, - ).Execute() - if err != nil { - return handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: fmt.Sprintf("Error starting stream processor: %s", err.Error()), - } + if peErr := startStreamProcessor(ctx, client.AtlasSDK, projectID, workspaceName, processorName); peErr != nil { + return *peErr } inProgressModel := &Model{} diff --git a/cfn-resources/stream-processor/cmd/resource/helpers.go b/cfn-resources/stream-processor/cmd/resource/helpers.go index b98185a04..473fc55ca 100644 --- a/cfn-resources/stream-processor/cmd/resource/helpers.go +++ b/cfn-resources/stream-processor/cmd/resource/helpers.go @@ -49,6 +49,8 @@ func parseTimeout(timeoutStr string) time.Duration { return duration } +// isTimeoutExceeded checks if the elapsed time since startTimeStr exceeds the timeoutDurationStr. +// If this function needs to be used by other resources in the future, it should be moved to the util package. func isTimeoutExceeded(startTimeStr, timeoutDurationStr string) bool { if startTimeStr == "" || timeoutDurationStr == "" { return false