diff --git a/cfn-resources/stream-processor/.rpdk-config b/cfn-resources/stream-processor/.rpdk-config new file mode 100644 index 000000000..40acec48b --- /dev/null +++ b/cfn-resources/stream-processor/.rpdk-config @@ -0,0 +1,27 @@ +{ + "artifact_type": "RESOURCE", + "typeName": "MongoDB::Atlas::StreamProcessor", + "language": "go", + "runtime": "provided.al2", + "entrypoint": "bootstrap", + "testEntrypoint": "bootstrap", + "settings": { + "version": false, + "subparser_name": null, + "verbose": 0, + "force": false, + "type_name": "MongoDB::Atlas::StreamProcessor", + "artifact_type": "r", + "endpoint_url": null, + "region": null, + "target_schemas": [], + "profile": null, + "import_path": "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor", + "protocolVersion": "2.0.0" + }, + "canarySettings": { + "contract_test_file_names": [ + "inputs_1.json" + ] + } +} diff --git a/cfn-resources/stream-processor/Makefile b/cfn-resources/stream-processor/Makefile new file mode 100644 index 000000000..606a597fa --- /dev/null +++ b/cfn-resources/stream-processor/Makefile @@ -0,0 +1,37 @@ +.PHONY: build debug test clean +tags=logging callback metrics scheduler +cgo=0 +goos=linux +goarch=amd64 +CFNREP_GIT_SHA?=$(shell git rev-parse HEAD) +ldXflags=-s -w -X github.com/mongodb/mongodbatlas-cloudformation-resources/util.defaultLogLevel=info -X github.com/mongodb/mongodbatlas-cloudformation-resources/version.Version=${CFNREP_GIT_SHA} +ldXflagsD=-X github.com/mongodb/mongodbatlas-cloudformation-resources/util.defaultLogLevel=debug -X github.com/mongodb/mongodbatlas-cloudformation-resources/version.Version=${CFNREP_GIT_SHA} + +build: + cfn generate + env GOOS=$(goos) CGO_ENABLED=$(cgo) GOARCH=$(goarch) go build -ldflags="$(ldXflags)" -tags="$(tags)" -o bin/bootstrap cmd/main.go + +debug: + cfn generate + env GOOS=$(goos) CGO_ENABLED=$(cgo) GOARCH=$(goarch) go build -ldflags="$(ldXflagsD)" -tags="$(tags)" -o bin/bootstrap cmd/main.go + +test: + cfn generate + env GOOS=$(goos) CGO_ENABLED=$(cgo) GOARCH=$(goarch) go build -ldflags="$(ldXflags)" -tags="$(tags)" -o bin/bootstrap cmd/main.go + +clean: + rm -rf bin + +create-test-resources: + @echo "==> Creating test files and resources for contract testing" + ./test/contract-testing/cfn-test-create.sh + +delete-test-resources: + @echo "==> Delete test resources used for contract testing" + ./test/contract-testing/cfn-test-delete.sh + +run-contract-testing: + @echo "==> Run contract testing" + make build + sam local start-lambda & + cfn test --function-name TestEntrypoint --verbose \ No newline at end of file diff --git a/cfn-resources/stream-processor/README.md b/cfn-resources/stream-processor/README.md new file mode 100644 index 000000000..285439edb --- /dev/null +++ b/cfn-resources/stream-processor/README.md @@ -0,0 +1,139 @@ +# MongoDB::Atlas::StreamProcessor + +## Description + +Resource for creating and managing [Stream Processors for an Atlas Stream Instance](https://www.mongodb.com/docs/api/doc/atlas-admin-api-v2/operation/operation-createstreamprocessor). + +## Requirements + +Set up an AWS profile to securely give CloudFormation access to your Atlas credentials. +For instructions on setting up a profile, [see here](/README.md#mongodb-atlas-api-keys-credential-management). + +## Attributes and Parameters + +See the [resource docs](docs/README.md). Also refer [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials. + +## Cloudformation Examples + +See the example [CFN Templates](/examples/atlas-streams/stream-processor/) for example resources: +- [Basic Stream Processor](/examples/atlas-streams/stream-processor/stream-processor.json) +- [Stream Processor with DLQ](/examples/atlas-streams/stream-processor/stream-processor-with-dlq.json) + +## Prerequisites + +Before creating a stream processor, you must have: +- An existing Atlas Project +- An existing Stream Instance/Workspace (created via `MongoDB::Atlas::StreamInstance` resource) +- At least one Stream Connection configured (created via `MongoDB::Atlas::StreamConnection` resource) + - A source connection (e.g., sample data source, cluster connection, or Kafka connection) + - A sink connection (must be a cluster connection for merge operations) + +## Deployment + +### Deploy Basic Stream Processor + +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-processor/stream-processor.json \ + --stack-name stream-processor-stack \ + --parameter-overrides \ + ProjectId= \ + WorkspaceName= \ + ProcessorName=my-processor \ + SourceConnectionName=sample_stream_solar \ + SinkConnectionName= \ + SinkDatabase=test \ + SinkCollection=output \ + State=CREATED \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +### Deploy Stream Processor with DLQ + +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-processor/stream-processor-with-dlq.json \ + --stack-name stream-processor-dlq-stack \ + --parameter-overrides \ + ProjectId= \ + WorkspaceName= \ + ProcessorName=my-processor-dlq \ + SourceConnectionName=sample_stream_solar \ + SinkConnectionName= \ + SinkDatabase=test \ + SinkCollection=output \ + DlqConnectionName= \ + DlqDatabase=dlq \ + DlqCollection=dlq-messages \ + State=CREATED \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +## Verification + +After deployment, verify the stream processor was created successfully using both Atlas CLI and Atlas UI. + +### Atlas CLI Verification + +```bash +# List all stream processors for a workspace +atlas streams processors list --projectId + +# Describe a specific stream processor +atlas streams processors describe \ + --instance \ + --projectId +``` + +### Expected CLI Output + +The `atlas streams processors describe` command should return: +- `id`: Unique identifier of the processor (matches the `Id` attribute in CloudFormation) +- `name`: Processor name (matches `ProcessorName` parameter) +- `state`: Current state (CREATED, STARTED, STOPPED, or FAILED) +- `pipeline`: Array of pipeline stages matching your Pipeline configuration +- `options`: DLQ configuration if provided (should match your Options.Dlq settings) +- `stats`: Processing statistics (available when processor is STARTED) + +### Verify Pipeline Configuration + +The pipeline should match your CloudFormation template: +- Source connection name should match `SourceConnectionName` parameter +- Merge target connection should match `SinkConnectionName` parameter +- Database and collection should match `SinkDatabase` and `SinkCollection` parameters + +### Verify DLQ Configuration (if applicable) + +For processors with DLQ: +- `options.dlq.connectionName` should match `DlqConnectionName` parameter +- `options.dlq.db` should match `DlqDatabase` parameter +- `options.dlq.coll` should match `DlqCollection` parameter + +### Atlas UI Verification + +1. Navigate to your Atlas project in the [Atlas UI](https://cloud.mongodb.com) +2. Go to **Stream Processing** section +3. Select your stream workspace/instance +4. Verify the processor appears in the **Processors** tab with: + - **Name**: Matches the `ProcessorName` from your CloudFormation template + - **State**: Matches the `State` parameter (CREATED, STARTED, or STOPPED) + - **Pipeline**: Click on the processor to view pipeline stages and verify: + - Source connection matches your `SourceConnectionName` parameter + - Merge target connection matches your `SinkConnectionName` parameter + - Target database and collection match your `SinkDatabase` and `SinkCollection` parameters +5. For processors with DLQ: + - Verify DLQ configuration is displayed in the processor details + - Check that DLQ connection, database, and collection match your parameters +6. If processor is in STARTED state: + - Verify processing statistics are available + - Check that messages are being processed (stats show input/output message counts) + +## Notes + +- **AWS Only**: This CloudFormation resource is designed for AWS deployments. The provider is effectively AWS. +- **WorkspaceName vs InstanceName**: Use `WorkspaceName` (preferred). `InstanceName` is supported for backward compatibility but is deprecated. +- **State Management**: When creating a processor, specify `State: STARTED` to automatically start processing, or `State: CREATED` to create it in a stopped state. +- **Long-Running Operations**: Creating and starting stream processors can take several minutes. The resource uses callback-based state management to handle these operations asynchronously. +- **Timeout Configuration**: Use `Timeouts.Create` to configure how long to wait for processor creation/startup (default: 20 minutes). diff --git a/cfn-resources/stream-processor/cmd/main.go b/cfn-resources/stream-processor/cmd/main.go new file mode 100644 index 000000000..e34cceb2e --- /dev/null +++ b/cfn-resources/stream-processor/cmd/main.go @@ -0,0 +1,85 @@ +// Code generated by 'cfn generate', changes will be undone by the next invocation. DO NOT EDIT. +package main + +import ( + "errors" + "fmt" + "log" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn" + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" +) + +// Handler is a container for the CRUDL actions exported by resources +type Handler struct{} + +// Create wraps the related Create function exposed by the resource code +func (r *Handler) Create(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Create) +} + +// Read wraps the related Read function exposed by the resource code +func (r *Handler) Read(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Read) +} + +// Update wraps the related Update function exposed by the resource code +func (r *Handler) Update(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Update) +} + +// Delete wraps the related Delete function exposed by the resource code +func (r *Handler) Delete(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.Delete) +} + +// List wraps the related List function exposed by the resource code +func (r *Handler) List(req handler.Request) handler.ProgressEvent { + return wrap(req, resource.List) +} + +// main is the entry point of the application. +func main() { + cfn.Start(&Handler{}) +} + +type handlerFunc func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) + +func wrap(req handler.Request, f handlerFunc) (response handler.ProgressEvent) { + defer func() { + // Catch any panics and return a failed ProgressEvent + if r := recover(); r != nil { + err, ok := r.(error) + if !ok { + err = errors.New(fmt.Sprint(r)) + } + + log.Printf("Trapped error in handler: %v", err) + + response = handler.NewFailedEvent(err) + } + }() + + // Populate the previous model + prevModel := &resource.Model{} + if err := req.UnmarshalPrevious(prevModel); err != nil { + log.Printf("Error unmarshaling prev model: %v", err) + return handler.NewFailedEvent(err) + } + + // Populate the current model + currentModel := &resource.Model{} + if err := req.Unmarshal(currentModel); err != nil { + log.Printf("Error unmarshaling model: %v", err) + return handler.NewFailedEvent(err) + } + + response, err := f(req, prevModel, currentModel) + if err != nil { + log.Printf("Error returned from handler function: %v", err) + return handler.NewFailedEvent(err) + } + + return response +} diff --git a/cfn-resources/stream-processor/cmd/resource/config.go b/cfn-resources/stream-processor/cmd/resource/config.go new file mode 100644 index 000000000..4d9eb7831 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/config.go @@ -0,0 +1,19 @@ +// Code generated by 'cfn generate', changes will be undone by the next invocation. DO NOT EDIT. +// Updates to this type are made my editing the schema file and executing the 'generate' command. +package resource + +import "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + +// TypeConfiguration is autogenerated from the json schema +type TypeConfiguration struct { +} + +// Configuration returns a resource's configuration. +func Configuration(req handler.Request) (*TypeConfiguration, error) { + // Populate the type configuration + typeConfig := &TypeConfiguration{} + if err := req.UnmarshalTypeConfig(typeConfig); err != nil { + return typeConfig, err + } + return typeConfig, nil +} diff --git a/cfn-resources/stream-processor/cmd/resource/mappings.go b/cfn-resources/stream-processor/cmd/resource/mappings.go new file mode 100644 index 000000000..dc69a02e5 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/mappings.go @@ -0,0 +1,185 @@ +// Copyright 2024 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "encoding/json" + "fmt" + + "go.mongodb.org/atlas-sdk/v20250312010/admin" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" +) + +func GetWorkspaceOrInstanceName(model *Model) (string, error) { + if model.WorkspaceName != nil && *model.WorkspaceName != "" { + return *model.WorkspaceName, nil + } + if model.InstanceName != nil && *model.InstanceName != "" { + return *model.InstanceName, nil + } + return "", fmt.Errorf("either WorkspaceName or InstanceName must be provided") +} + +func ConvertPipelineToSdk(pipeline string) ([]any, error) { + var pipelineSliceOfMaps []any + err := json.Unmarshal([]byte(pipeline), &pipelineSliceOfMaps) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal pipeline: %w", err) + } + return pipelineSliceOfMaps, nil +} + +func ConvertPipelineToString(pipeline []any) (string, error) { + pipelineJSON, err := json.Marshal(pipeline) + if err != nil { + return "", fmt.Errorf("failed to marshal pipeline: %w", err) + } + return string(pipelineJSON), nil +} + +func ConvertStatsToString(stats any) (string, error) { + if stats == nil { + return "", nil + } + statsJSON, err := json.Marshal(stats) + if err != nil { + return "", fmt.Errorf("failed to marshal stats: %w", err) + } + return string(statsJSON), nil +} + +func NewStreamProcessorReq(model *Model) (*admin.StreamsProcessor, error) { + pipeline, err := ConvertPipelineToSdk(util.SafeString(model.Pipeline)) + if err != nil { + return nil, err + } + + streamProcessor := &admin.StreamsProcessor{ + Name: model.ProcessorName, + Pipeline: &pipeline, + } + + if model.Options != nil && model.Options.Dlq != nil { + dlq := model.Options.Dlq + if dlq.Coll != nil && *dlq.Coll != "" && + dlq.ConnectionName != nil && *dlq.ConnectionName != "" && + dlq.Db != nil && *dlq.Db != "" { + streamProcessor.Options = &admin.StreamsOptions{ + Dlq: &admin.StreamsDLQ{ + Coll: dlq.Coll, + ConnectionName: dlq.ConnectionName, + Db: dlq.Db, + }, + } + } + } + + return streamProcessor, nil +} + +func NewStreamProcessorUpdateReq(model *Model) (*admin.UpdateStreamProcessorApiParams, error) { + pipeline, err := ConvertPipelineToSdk(util.SafeString(model.Pipeline)) + if err != nil { + return nil, err + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(model) + if err != nil { + return nil, err + } + + streamProcessorAPIParams := &admin.UpdateStreamProcessorApiParams{ + GroupId: util.SafeString(model.ProjectId), + TenantName: workspaceOrInstanceName, + ProcessorName: util.SafeString(model.ProcessorName), + StreamsModifyStreamProcessor: &admin.StreamsModifyStreamProcessor{ + Name: model.ProcessorName, + Pipeline: &pipeline, + }, + } + + if model.Options != nil && model.Options.Dlq != nil { + dlq := model.Options.Dlq + if dlq.Coll != nil && *dlq.Coll != "" && + dlq.ConnectionName != nil && *dlq.ConnectionName != "" && + dlq.Db != nil && *dlq.Db != "" { + streamProcessorAPIParams.StreamsModifyStreamProcessor.Options = &admin.StreamsModifyStreamProcessorOptions{ + Dlq: &admin.StreamsDLQ{ + Coll: dlq.Coll, + ConnectionName: dlq.ConnectionName, + Db: dlq.Db, + }, + } + } + } + + return streamProcessorAPIParams, nil +} + +func GetStreamProcessorModel(streamProcessor *admin.StreamsProcessorWithStats, currentModel *Model) (*Model, error) { + model := new(Model) + + if currentModel != nil { + *model = *currentModel + model.DeleteOnCreateTimeout = nil + } + + model.ProcessorName = util.Pointer(streamProcessor.Name) + model.Id = util.Pointer(streamProcessor.Id) + model.State = util.Pointer(streamProcessor.State) + + if currentModel != nil && currentModel.Pipeline != nil { + model.Pipeline = currentModel.Pipeline + } else if streamProcessor.Pipeline != nil { + pipelineStr, err := ConvertPipelineToString(streamProcessor.GetPipeline()) + if err != nil { + return nil, err + } + model.Pipeline = &pipelineStr + } + + if streamProcessor.Stats != nil { + statsStr, err := ConvertStatsToString(streamProcessor.GetStats()) + if err != nil { + return nil, err + } + model.Stats = &statsStr + } + + if streamProcessor.Options != nil && streamProcessor.Options.Dlq != nil { + apiDlq := streamProcessor.Options.Dlq + if apiDlq.Coll != nil && *apiDlq.Coll != "" && + apiDlq.ConnectionName != nil && *apiDlq.ConnectionName != "" && + apiDlq.Db != nil && *apiDlq.Db != "" { + model.Options = &StreamsOptions{ + Dlq: &StreamsDLQ{ + Coll: apiDlq.Coll, + ConnectionName: apiDlq.ConnectionName, + Db: apiDlq.Db, + }, + } + } + } else if currentModel != nil && currentModel.Options != nil && currentModel.Options.Dlq != nil { + currentDlq := currentModel.Options.Dlq + if currentDlq.Coll != nil && *currentDlq.Coll != "" && + currentDlq.ConnectionName != nil && *currentDlq.ConnectionName != "" && + currentDlq.Db != nil && *currentDlq.Db != "" { + model.Options = currentModel.Options + } + } + + return model, nil +} diff --git a/cfn-resources/stream-processor/cmd/resource/mappings_test.go b/cfn-resources/stream-processor/cmd/resource/mappings_test.go new file mode 100644 index 000000000..93d87a6b2 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/mappings_test.go @@ -0,0 +1,971 @@ +// Copyright 2024 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource_test + +import ( + "encoding/json" + "testing" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + admin20250312010 "go.mongodb.org/atlas-sdk/v20250312010/admin" +) + +func TestGetWorkspaceOrInstanceName(t *testing.T) { + testCases := map[string]struct { + model *resource.Model + expectedResult string + expectedError string + }{ + "workspaceNameOnly": { + model: &resource.Model{ + WorkspaceName: util.StringPtr("workspace-1"), + InstanceName: nil, + }, + expectedResult: "workspace-1", + expectedError: "", + }, + "instanceNameOnly": { + model: &resource.Model{ + WorkspaceName: nil, + InstanceName: util.StringPtr("instance-1"), + }, + expectedResult: "instance-1", + expectedError: "", + }, + "bothSet": { + model: &resource.Model{ + WorkspaceName: util.StringPtr("workspace-1"), + InstanceName: util.StringPtr("instance-1"), + }, + expectedResult: "workspace-1", // WorkspaceName takes precedence + expectedError: "", + }, + "neitherSet": { + model: &resource.Model{ + WorkspaceName: nil, + InstanceName: nil, + }, + expectedResult: "", + expectedError: "either WorkspaceName or InstanceName must be provided", + }, + "workspaceNameEmptyString": { + model: &resource.Model{ + WorkspaceName: util.StringPtr(""), + InstanceName: util.StringPtr("instance-1"), + }, + expectedResult: "instance-1", + expectedError: "", + }, + "instanceNameEmptyString": { + model: &resource.Model{ + WorkspaceName: util.StringPtr("workspace-1"), + InstanceName: util.StringPtr(""), + }, + expectedResult: "workspace-1", + expectedError: "", + }, + "bothEmptyStrings": { + model: &resource.Model{ + WorkspaceName: util.StringPtr(""), + InstanceName: util.StringPtr(""), + }, + expectedResult: "", + expectedError: "either WorkspaceName or InstanceName must be provided", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.GetWorkspaceOrInstanceName(tc.model) + if tc.expectedError != "" { + require.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedError) + assert.Empty(t, result) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedResult, result) + } + }) + } +} + +func TestConvertPipelineToSdk(t *testing.T) { + testCases := map[string]struct { + validateFunc func(t *testing.T, result []any) + pipeline string + expectedError bool + }{ + "validSimplePipeline": { + pipeline: `[{"$match": {"status": "active"}}]`, + expectedError: false, + validateFunc: func(t *testing.T, result []any) { + t.Helper() + require.Len(t, result, 1) + stage, ok := result[0].(map[string]any) + require.True(t, ok) + assert.Equal(t, "active", stage["$match"].(map[string]any)["status"]) + }, + }, + "validComplexPipeline": { + pipeline: `[{"$match": {"status": "active"}}, {"$group": {"_id": "$category", "count": {"$sum": 1}}}]`, + expectedError: false, + validateFunc: func(t *testing.T, result []any) { + t.Helper() + require.Len(t, result, 2) + }, + }, + "validEmptyPipeline": { + pipeline: `[]`, + expectedError: false, + validateFunc: func(t *testing.T, result []any) { + t.Helper() + assert.Empty(t, result) + }, + }, + "invalidJSON": { + pipeline: `[{"$match": {"status": "active"}`, + expectedError: true, + validateFunc: nil, + }, + "notAnArray": { + pipeline: `{"$match": {"status": "active"}}`, + expectedError: true, // JSON unmarshal fails when trying to unmarshal object into slice + validateFunc: nil, + }, + "emptyString": { + pipeline: ``, + expectedError: true, + validateFunc: nil, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.ConvertPipelineToSdk(tc.pipeline) + if tc.expectedError { + require.Error(t, err) + assert.Nil(t, result) + } else { + require.NoError(t, err) + if tc.validateFunc != nil { + tc.validateFunc(t, result) + } + } + }) + } +} + +func TestConvertPipelineToString(t *testing.T) { + testCases := map[string]struct { + expectedJSON string + pipeline []any + expectedError bool + }{ + "validPipeline": { + pipeline: []any{ + map[string]any{"$match": map[string]any{"status": "active"}}, + }, + expectedJSON: `[{"$match":{"status":"active"}}]`, + expectedError: false, + }, + "validComplexPipeline": { + pipeline: []any{ + map[string]any{"$match": map[string]any{"status": "active"}}, + map[string]any{"$group": map[string]any{"_id": "$category"}}, + }, + expectedJSON: `[{"$match":{"status":"active"}},{"$group":{"_id":"$category"}}]`, + expectedError: false, + }, + "emptyPipeline": { + pipeline: []any{}, + expectedJSON: `[]`, + expectedError: false, + }, + "nilPipeline": { + pipeline: nil, + expectedJSON: `null`, + expectedError: false, + }, + "pipelineWithNestedObjects": { + pipeline: []any{ + map[string]any{ + "$match": map[string]any{ + "status": "active", + "tags": []any{"important", "urgent"}, + }, + }, + }, + expectedJSON: `[{"$match":{"status":"active","tags":["important","urgent"]}}]`, + expectedError: false, + }, + "pipelineWithNumbers": { + pipeline: []any{ + map[string]any{ + "$limit": 10, + "$skip": 5, + }, + }, + expectedJSON: `[{"$limit":10,"$skip":5}]`, + expectedError: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.ConvertPipelineToString(tc.pipeline) + if tc.expectedError { + require.Error(t, err) + assert.Empty(t, result) + } else { + require.NoError(t, err) + // Parse both JSONs and compare to handle formatting differences + var resultJSON, expectedJSON any + require.NoError(t, json.Unmarshal([]byte(result), &resultJSON)) + require.NoError(t, json.Unmarshal([]byte(tc.expectedJSON), &expectedJSON)) + assert.Equal(t, expectedJSON, resultJSON) + } + }) + } +} + +func TestConvertStatsToString(t *testing.T) { + testCases := map[string]struct { + stats any + expectedJSON string + expectedError bool + }{ + "nilStats": { + stats: nil, + expectedJSON: "", + expectedError: false, + }, + "validStats": { + stats: map[string]any{ + "bytesProcessed": 1000, + "recordsProcessed": 100, + }, + expectedJSON: `{"bytesProcessed":1000,"recordsProcessed":100}`, + expectedError: false, + }, + "emptyMap": { + stats: map[string]any{}, + expectedJSON: `{}`, + expectedError: false, + }, + "nestedStats": { + stats: map[string]any{ + "input": map[string]any{ + "bytes": 1000, + }, + "output": map[string]any{ + "records": 100, + }, + }, + expectedJSON: `{"input":{"bytes":1000},"output":{"records":100}}`, + expectedError: false, + }, + "statsWithArray": { + stats: map[string]any{ + "errors": []any{"error1", "error2"}, + "count": 5, + }, + expectedJSON: `{"count":5,"errors":["error1","error2"]}`, + expectedError: false, + }, + "statsWithNumbers": { + stats: map[string]any{ + "floatValue": 3.14, + "intValue": 42, + }, + expectedJSON: `{"floatValue":3.14,"intValue":42}`, + expectedError: false, + }, + "statsWithBoolean": { + stats: map[string]any{ + "enabled": true, + "active": false, + }, + expectedJSON: `{"active":false,"enabled":true}`, + expectedError: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.ConvertStatsToString(tc.stats) + if tc.expectedError { + require.Error(t, err) + assert.Empty(t, result) + } else { + require.NoError(t, err) + if tc.stats == nil { + assert.Empty(t, result) + } else { + // Parse both JSONs and compare to handle formatting differences + var resultJSON, expectedJSON any + require.NoError(t, json.Unmarshal([]byte(result), &resultJSON)) + require.NoError(t, json.Unmarshal([]byte(tc.expectedJSON), &expectedJSON)) + assert.Equal(t, expectedJSON, resultJSON) + } + } + }) + } +} + +func TestNewStreamProcessorReq(t *testing.T) { + testCases := map[string]struct { + model *resource.Model + validateFunc func(t *testing.T, result *admin20250312010.StreamsProcessor) + expectedError bool + }{ + "minimalRequest": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.StreamsProcessor) { + t.Helper() + require.NotNil(t, result) + assert.Equal(t, "test-processor", result.GetName()) + assert.NotNil(t, result.Pipeline) + pipeline := result.GetPipeline() + require.Len(t, pipeline, 1) + }, + }, + "withOptions": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + Options: &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("dlq-collection"), + ConnectionName: util.StringPtr("dlq-connection"), + Db: util.StringPtr("dlq-db"), + }, + }, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.StreamsProcessor) { + t.Helper() + require.NotNil(t, result) + assert.Equal(t, "test-processor", result.GetName()) + require.NotNil(t, result.Options) + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + assert.Equal(t, "dlq-connection", util.SafeString(result.Options.Dlq.ConnectionName)) + assert.Equal(t, "dlq-db", util.SafeString(result.Options.Dlq.Db)) + }, + }, + "invalidPipeline": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`invalid json`), + }, + expectedError: true, + validateFunc: nil, + }, + "nilPipeline": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: nil, + }, + expectedError: true, + validateFunc: nil, + }, + "emptyPipeline": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(``), + }, + expectedError: true, + validateFunc: nil, + }, + "withOptionsButNilDlq": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + Options: &resource.StreamsOptions{ + Dlq: nil, + }, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.StreamsProcessor) { + t.Helper() + require.NotNil(t, result) + assert.Nil(t, result.Options) + }, + }, + "withNilOptions": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + Options: nil, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.StreamsProcessor) { + t.Helper() + require.NotNil(t, result) + assert.Nil(t, result.Options) + }, + }, + "complexPipeline": { + model: &resource.Model{ + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}, {"$group": {"_id": "$category", "count": {"$sum": 1}}}, {"$sort": {"count": -1}}]`), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.StreamsProcessor) { + t.Helper() + require.NotNil(t, result) + pipeline := result.GetPipeline() + require.Len(t, pipeline, 3) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.NewStreamProcessorReq(tc.model) + if tc.expectedError { + require.Error(t, err) + assert.Nil(t, result) + } else { + require.NoError(t, err) + if tc.validateFunc != nil { + tc.validateFunc(t, result) + } + } + }) + } +} + +func TestNewStreamProcessorUpdateReq(t *testing.T) { + testCases := map[string]struct { + model *resource.Model + validateFunc func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) + expectedError bool + }{ + "minimalRequest": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) { + t.Helper() + require.NotNil(t, result) + assert.Equal(t, "507f1f77bcf86cd799439011", result.GroupId) + assert.Equal(t, "workspace-1", result.TenantName) + assert.Equal(t, "test-processor", result.ProcessorName) + require.NotNil(t, result.StreamsModifyStreamProcessor) + assert.Equal(t, "test-processor", result.StreamsModifyStreamProcessor.GetName()) + assert.NotNil(t, result.StreamsModifyStreamProcessor.Pipeline) + }, + }, + "withInstanceName": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + InstanceName: util.StringPtr("instance-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) { + t.Helper() + require.NotNil(t, result) + assert.Equal(t, "instance-1", result.TenantName) + }, + }, + "withOptions": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + Options: &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("dlq-collection"), + ConnectionName: util.StringPtr("dlq-connection"), + Db: util.StringPtr("dlq-db"), + }, + }, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) { + t.Helper() + require.NotNil(t, result) + require.NotNil(t, result.StreamsModifyStreamProcessor) + require.NotNil(t, result.StreamsModifyStreamProcessor.Options) + require.NotNil(t, result.StreamsModifyStreamProcessor.Options.Dlq) + assert.Equal(t, "dlq-collection", result.StreamsModifyStreamProcessor.Options.Dlq.GetColl()) + assert.Equal(t, "dlq-connection", result.StreamsModifyStreamProcessor.Options.Dlq.GetConnectionName()) + assert.Equal(t, "dlq-db", result.StreamsModifyStreamProcessor.Options.Dlq.GetDb()) + }, + }, + "invalidPipeline": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`invalid json`), + }, + expectedError: true, + validateFunc: nil, + }, + "bothWorkspaceAndInstanceName": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + InstanceName: util.StringPtr("instance-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) { + t.Helper() + require.NotNil(t, result) + // WorkspaceName takes precedence over InstanceName + assert.Equal(t, "workspace-1", result.TenantName) + }, + }, + "neitherWorkspaceNorInstanceName": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + expectedError: true, + validateFunc: nil, + }, + "withNilOptions": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + Options: nil, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) { + t.Helper() + require.NotNil(t, result) + require.NotNil(t, result.StreamsModifyStreamProcessor) + assert.Nil(t, result.StreamsModifyStreamProcessor.Options) + }, + }, + "withOptionsButNilDlq": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + Options: &resource.StreamsOptions{ + Dlq: nil, + }, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) { + t.Helper() + require.NotNil(t, result) + require.NotNil(t, result.StreamsModifyStreamProcessor) + assert.Nil(t, result.StreamsModifyStreamProcessor.Options) + }, + }, + "nilProcessorName": { + model: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: nil, + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *admin20250312010.UpdateStreamProcessorApiParams) { + t.Helper() + require.NotNil(t, result) + assert.Empty(t, result.ProcessorName) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.NewStreamProcessorUpdateReq(tc.model) + if tc.expectedError { + require.Error(t, err) + assert.Nil(t, result) + } else { + require.NoError(t, err) + if tc.validateFunc != nil { + tc.validateFunc(t, result) + } + } + }) + } +} + +func TestGetStreamProcessorModel(t *testing.T) { + testCases := map[string]struct { + streamProcessor *admin20250312010.StreamsProcessorWithStats + currentModel *resource.Model + validateFunc func(t *testing.T, result *resource.Model) + expectedError bool + }{ + "minimalConversion": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + }, + currentModel: nil, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + assert.Equal(t, "test-processor", util.SafeString(result.ProcessorName)) + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(result.Id)) + assert.Equal(t, "CREATED", util.SafeString(result.State)) + }, + }, + "withPipeline": { + streamProcessor: func() *admin20250312010.StreamsProcessorWithStats { + pipeline := []any{ + map[string]any{"$match": map[string]any{"status": "active"}}, + } + return &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Pipeline: pipeline, + } + }(), + currentModel: nil, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + assert.NotNil(t, result.Pipeline) + // Verify pipeline is valid JSON + var pipelineJSON any + err := json.Unmarshal([]byte(*result.Pipeline), &pipelineJSON) + require.NoError(t, err) + }, + }, + "withStats": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Stats: map[string]any{ + "bytesProcessed": 1000, + }, + }, + currentModel: nil, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + assert.NotNil(t, result.Stats) + // Verify stats is valid JSON + var statsJSON any + err := json.Unmarshal([]byte(*result.Stats), &statsJSON) + require.NoError(t, err) + }, + }, + "withOptions": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Options: &admin20250312010.StreamsOptions{ + Dlq: &admin20250312010.StreamsDLQ{ + Coll: admin20250312010.PtrString("dlq-collection"), + ConnectionName: admin20250312010.PtrString("dlq-connection"), + Db: admin20250312010.PtrString("dlq-db"), + }, + }, + }, + currentModel: nil, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + require.NotNil(t, result.Options) + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + assert.Equal(t, "dlq-connection", util.SafeString(result.Options.Dlq.ConnectionName)) + assert.Equal(t, "dlq-db", util.SafeString(result.Options.Dlq.Db)) + }, + }, + "preserveCurrentModelOptions": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + // No options in response + }, + currentModel: &resource.Model{ + Options: &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("existing-dlq-collection"), + ConnectionName: util.StringPtr("existing-dlq-connection"), + Db: util.StringPtr("existing-dlq-db"), + }, + }, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + require.NotNil(t, result.Options) + require.NotNil(t, result.Options.Dlq) + // Should preserve current model's options + assert.Equal(t, "existing-dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + assert.Equal(t, "existing-dlq-connection", util.SafeString(result.Options.Dlq.ConnectionName)) + assert.Equal(t, "existing-dlq-db", util.SafeString(result.Options.Dlq.Db)) + }, + }, + "withCurrentModel": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "STARTED", + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + // Should use currentModel as base + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(result.ProjectId)) + assert.Equal(t, "test-processor", util.SafeString(result.ProcessorName)) + assert.Equal(t, "STARTED", util.SafeString(result.State)) + }, + }, + "withAllFields": { + streamProcessor: func() *admin20250312010.StreamsProcessorWithStats { + pipeline := []any{ + map[string]any{"$match": map[string]any{"status": "active"}}, + map[string]any{"$group": map[string]any{"_id": "$category"}}, + } + return &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "STARTED", + Pipeline: pipeline, + Stats: map[string]any{ + "bytesProcessed": 5000, + "recordsProcessed": 500, + }, + Options: &admin20250312010.StreamsOptions{ + Dlq: &admin20250312010.StreamsDLQ{ + Coll: admin20250312010.PtrString("dlq-collection"), + ConnectionName: admin20250312010.PtrString("dlq-connection"), + Db: admin20250312010.PtrString("dlq-db"), + }, + }, + } + }(), + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + }, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + assert.Equal(t, "test-processor", util.SafeString(result.ProcessorName)) + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(result.Id)) + assert.Equal(t, "STARTED", util.SafeString(result.State)) + assert.NotNil(t, result.Pipeline) + assert.NotNil(t, result.Stats) + require.NotNil(t, result.Options) + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + assert.Equal(t, "dlq-connection", util.SafeString(result.Options.Dlq.ConnectionName)) + assert.Equal(t, "dlq-db", util.SafeString(result.Options.Dlq.Db)) + }, + }, + "withOptionsButNoDlq": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Options: &admin20250312010.StreamsOptions{ + // Options exists but Dlq is nil + }, + }, + currentModel: &resource.Model{ + Options: &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("existing-dlq-collection"), + ConnectionName: util.StringPtr("existing-dlq-connection"), + Db: util.StringPtr("existing-dlq-db"), + }, + }, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + // Should preserve current model's options when response has no Dlq + require.NotNil(t, result.Options) + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "existing-dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + }, + }, + "withNilPipeline": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Pipeline: nil, + }, + currentModel: nil, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + assert.Nil(t, result.Pipeline) + }, + }, + "withNilStats": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Stats: nil, + }, + currentModel: nil, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + assert.Nil(t, result.Stats) + }, + }, + "withNilOptions": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Options: nil, + }, + currentModel: &resource.Model{ + Options: &resource.StreamsOptions{ + Dlq: &resource.StreamsDLQ{ + Coll: util.StringPtr("existing-dlq-collection"), + ConnectionName: util.StringPtr("existing-dlq-connection"), + Db: util.StringPtr("existing-dlq-db"), + }, + }, + }, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + // Should preserve current model's options when response has nil options + require.NotNil(t, result.Options) + require.NotNil(t, result.Options.Dlq) + assert.Equal(t, "existing-dlq-collection", util.SafeString(result.Options.Dlq.Coll)) + }, + }, + "withEmptyPipeline": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Pipeline: []any{}, + }, + currentModel: nil, + expectedError: false, + validateFunc: func(t *testing.T, result *resource.Model) { + t.Helper() + require.NotNil(t, result) + assert.NotNil(t, result.Pipeline) + assert.Equal(t, "[]", *result.Pipeline) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result, err := resource.GetStreamProcessorModel(tc.streamProcessor, tc.currentModel) + if tc.expectedError { + require.Error(t, err) + assert.Nil(t, result) + } else { + require.NoError(t, err) + if tc.validateFunc != nil { + tc.validateFunc(t, result) + } + } + }) + } +} + +// TestPipelineRoundTrip tests that converting pipeline to SDK and back preserves data +func TestPipelineRoundTrip(t *testing.T) { + originalPipeline := `[{"$match": {"status": "active"}}, {"$group": {"_id": "$category", "count": {"$sum": 1}}}]` + + sdkPipeline, err := resource.ConvertPipelineToSdk(originalPipeline) + require.NoError(t, err) + + convertedBack, err := resource.ConvertPipelineToString(sdkPipeline) + require.NoError(t, err) + + // Parse both and compare + var original, converted any + require.NoError(t, json.Unmarshal([]byte(originalPipeline), &original)) + require.NoError(t, json.Unmarshal([]byte(convertedBack), &converted)) + assert.Equal(t, original, converted) +} + +// TestStatsRoundTrip tests that converting stats to string and parsing back preserves data +func TestStatsRoundTrip(t *testing.T) { + originalStats := map[string]any{ + "bytesProcessed": 1000, + "recordsProcessed": 100, + "nested": map[string]any{ + "value": 42, + }, + } + + statsString, err := resource.ConvertStatsToString(originalStats) + require.NoError(t, err) + + // Parse back and compare + var parsedStats any + require.NoError(t, json.Unmarshal([]byte(statsString), &parsedStats)) + + // JSON unmarshaling converts numbers to float64, so we need to compare values + parsedMap, ok := parsedStats.(map[string]any) + require.True(t, ok) + + // Compare values accounting for int->float64 conversion + assert.InDelta(t, float64(1000), parsedMap["bytesProcessed"], 0.01) + assert.InDelta(t, float64(100), parsedMap["recordsProcessed"], 0.01) + nested, ok := parsedMap["nested"].(map[string]any) + require.True(t, ok) + assert.InDelta(t, float64(42), nested["value"], 0.01) +} diff --git a/cfn-resources/stream-processor/cmd/resource/model.go b/cfn-resources/stream-processor/cmd/resource/model.go new file mode 100644 index 000000000..282c8e1a8 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/model.go @@ -0,0 +1,37 @@ +// Code generated by 'cfn generate', changes will be undone by the next invocation. DO NOT EDIT. +// Updates to this type are made my editing the schema file and executing the 'generate' command. +package resource + +// Model is autogenerated from the json schema +type Model struct { + Profile *string `json:",omitempty"` + ProjectId *string `json:",omitempty"` + InstanceName *string `json:",omitempty"` + WorkspaceName *string `json:",omitempty"` + ProcessorName *string `json:",omitempty"` + Pipeline *string `json:",omitempty"` + DesiredState *string `json:",omitempty"` + State *string `json:",omitempty"` + Options *StreamsOptions `json:",omitempty"` + Id *string `json:",omitempty"` + Stats *string `json:",omitempty"` + Timeouts *Timeouts `json:",omitempty"` + DeleteOnCreateTimeout *bool `json:",omitempty"` +} + +// StreamsOptions is autogenerated from the json schema +type StreamsOptions struct { + Dlq *StreamsDLQ `json:",omitempty"` +} + +// StreamsDLQ is autogenerated from the json schema +type StreamsDLQ struct { + Coll *string `json:",omitempty"` + ConnectionName *string `json:",omitempty"` + Db *string `json:",omitempty"` +} + +// Timeouts is autogenerated from the json schema +type Timeouts struct { + Create *string `json:",omitempty"` +} diff --git a/cfn-resources/stream-processor/cmd/resource/resource.go b/cfn-resources/stream-processor/cmd/resource/resource.go new file mode 100644 index 000000000..5dd846478 --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/resource.go @@ -0,0 +1,851 @@ +// Copyright 2024 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "context" + "fmt" + "maps" + "net/http" + "time" + + "go.mongodb.org/atlas-sdk/v20250312010/admin" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/logger" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/progressevent" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/validator" +) + +const ( + InitiatingState = "INIT" + CreatingState = "CREATING" + CreatedState = "CREATED" + StartedState = "STARTED" + StoppedState = "STOPPED" + DroppedState = "DROPPED" + FailedState = "FAILED" +) + +const ( + DefaultCallbackDelaySeconds = 3 + DefaultCreateTimeout = 20 * time.Minute +) + +func Setup() { + util.SetupLogger("mongodb-atlas-stream-processor") +} + +var CreateRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} +var ReadRequiredFields = []string{constants.ProjectID, constants.ProcessorName} +var UpdateRequiredFields = []string{constants.ProjectID, constants.ProcessorName, constants.Pipeline} +var DeleteRequiredFields = []string{constants.ProjectID, constants.ProcessorName} +var ListRequiredFields = []string{constants.ProjectID} + +var InitEnvWithLatestClient = func(req handler.Request, currentModel *Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { + Setup() + util.SetDefaultProfileIfNotDefined(¤tModel.Profile) + + if errEvent := validator.ValidateModel(requiredFields, currentModel); errEvent != nil { + return nil, errEvent + } + + client, peErr := util.NewAtlasClient(&req, currentModel.Profile) + if peErr != nil { + return nil, peErr + } + return client.AtlasSDK, nil +} + +type CallbackData struct { + ProjectID string + WorkspaceOrInstanceName string + ProcessorName string + DesiredState string + StartTime string + TimeoutDuration string + NeedsStarting bool + DeleteOnCreateTimeout bool +} + +func IsCallback(req *handler.Request) bool { + _, found := req.CallbackContext["callbackStreamProcessor"] + return found +} + +func GetCallbackData(req handler.Request) *CallbackData { + ctx := &CallbackData{} + + if val, ok := req.CallbackContext["projectID"].(string); ok { + ctx.ProjectID = val + } + if val, ok := req.CallbackContext["workspaceName"].(string); ok { + ctx.WorkspaceOrInstanceName = val + } + if val, ok := req.CallbackContext["processorName"].(string); ok { + ctx.ProcessorName = val + } + if val, ok := req.CallbackContext["needsStarting"].(bool); ok { + ctx.NeedsStarting = val + } + if val, ok := req.CallbackContext["desiredState"].(string); ok { + ctx.DesiredState = val + } + if val, ok := req.CallbackContext["startTime"].(string); ok { + ctx.StartTime = val + } + if val, ok := req.CallbackContext["timeoutDuration"].(string); ok { + ctx.TimeoutDuration = val + } + if val, ok := req.CallbackContext["deleteOnCreateTimeout"].(bool); ok { + ctx.DeleteOnCreateTimeout = val + } + + return ctx +} + +func ValidateCallbackData(ctx *CallbackData) *handler.ProgressEvent { + if ctx.ProjectID == "" || ctx.WorkspaceOrInstanceName == "" || ctx.ProcessorName == "" { + return &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Missing required values in callback context", + } + } + return nil +} + +func CopyIdentifyingFields(resourceModel, currentModel *Model) { + resourceModel.Profile = currentModel.Profile + resourceModel.ProjectId = currentModel.ProjectId + resourceModel.ProcessorName = currentModel.ProcessorName + + switch { + case currentModel.WorkspaceName != nil && *currentModel.WorkspaceName != "": + resourceModel.WorkspaceName = currentModel.WorkspaceName + resourceModel.InstanceName = util.Pointer(*currentModel.WorkspaceName) + case currentModel.InstanceName != nil && *currentModel.InstanceName != "": + resourceModel.InstanceName = currentModel.InstanceName + resourceModel.WorkspaceName = util.Pointer(*currentModel.InstanceName) + default: + resourceModel.WorkspaceName = currentModel.WorkspaceName + resourceModel.InstanceName = currentModel.InstanceName + } +} + +func BuildCallbackContext(projectID, workspaceOrInstanceName, processorName string, additionalFields map[string]any) map[string]any { + ctx := map[string]any{ + "callbackStreamProcessor": true, + "projectID": projectID, + "workspaceName": workspaceOrInstanceName, + "processorName": processorName, + } + + maps.Copy(ctx, additionalFields) + + return ctx +} + +func ParseTimeout(timeoutStr string) time.Duration { + if timeoutStr == "" { + return DefaultCreateTimeout + } + duration, err := time.ParseDuration(timeoutStr) + if err != nil { + _, _ = logger.Warnf("Invalid timeout format '%s', using default: %v", timeoutStr, err) + return DefaultCreateTimeout + } + return duration +} + +func IsTimeoutExceeded(startTimeStr, timeoutDurationStr string) bool { + if startTimeStr == "" || timeoutDurationStr == "" { + return false + } + + startTime, err := time.Parse(time.RFC3339, startTimeStr) + if err != nil { + _, _ = logger.Warnf("Invalid start time format '%s': %v", startTimeStr, err) + return false + } + + timeoutDuration := ParseTimeout(timeoutDurationStr) + elapsed := time.Since(startTime) + + return elapsed >= timeoutDuration +} + +func CleanupOnCreateTimeout(ctx context.Context, atlasClient *admin.APIClient, callbackCtx *CallbackData) error { + if !callbackCtx.DeleteOnCreateTimeout { + return nil + } + + _, err := atlasClient.StreamsApi.DeleteStreamProcessor(ctx, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName).Execute() + if err != nil { + _, _ = logger.Warnf("Cleanup delete failed: %v", err) + } + return nil +} + +func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, CreateRequiredFields) + if peErr != nil { + return *peErr, nil + } + + if IsCallback(&req) { + callbackCtx := GetCallbackData(req) + if peErr := ValidateCallbackData(callbackCtx); peErr != nil { + return *peErr, nil + } + return HandleCreateCallback( + context.Background(), + atlasClient, + currentModel, + callbackCtx, + ) + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + var needsStarting bool + if currentModel.DesiredState != nil { + state := *currentModel.DesiredState + switch state { + case StartedState: + needsStarting = true + case CreatedState: + needsStarting = false + default: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "When creating a stream processor, the only valid states are CREATED and STARTED", + }, nil + } + } + + streamProcessorReq, err := NewStreamProcessorReq(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating stream processor request: %s", err.Error()), + }, nil + } + + _, apiResp, err := atlasClient.StreamsApi.CreateStreamProcessor(ctx, projectID, workspaceOrInstanceName, streamProcessorReq).Execute() + if err != nil { + return HandleError(apiResp, constants.CREATE, err) + } + + timeoutStr := "" + if currentModel.Timeouts != nil && currentModel.Timeouts.Create != nil { + timeoutStr = *currentModel.Timeouts.Create + } + + deleteOnCreateTimeout := true + if currentModel.DeleteOnCreateTimeout != nil { + deleteOnCreateTimeout = *currentModel.DeleteOnCreateTimeout + } + + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Creating stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: DefaultCallbackDelaySeconds, + CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "needsStarting": needsStarting, + "startTime": time.Now().Format(time.RFC3339), + "timeoutDuration": timeoutStr, + "deleteOnCreateTimeout": deleteOnCreateTimeout, + }), + }, nil +} + +func HandleCreateCallback(ctx context.Context, atlasClient *admin.APIClient, currentModel *Model, callbackCtx *CallbackData) (handler.ProgressEvent, error) { + needsStarting := callbackCtx.NeedsStarting + + if IsTimeoutExceeded(callbackCtx.StartTime, callbackCtx.TimeoutDuration) { + if err := CleanupOnCreateTimeout(context.Background(), atlasClient, callbackCtx); err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Timeout reached and cleanup failed: %s", err.Error()), + }, nil + } + cleanupMsg := "Timeout reached when waiting for stream processor creation" + if callbackCtx.DeleteOnCreateTimeout { + cleanupMsg += ". Resource has been deleted because delete_on_create_timeout is true. If you suspect a transient error, wait before retrying to allow resource deletion to finish." + } else { + cleanupMsg += ". Cleanup was not performed because delete_on_create_timeout is false." + } + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: cleanupMsg, + }, nil + } + + streamProcessor, peErr := GetStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) + if peErr != nil { + return *peErr, nil + } + + currentState := streamProcessor.GetState() + + callbackContext := BuildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + "needsStarting": callbackCtx.NeedsStarting, + "startTime": callbackCtx.StartTime, + "timeoutDuration": callbackCtx.TimeoutDuration, + "deleteOnCreateTimeout": callbackCtx.DeleteOnCreateTimeout, + }) + + switch currentState { + case CreatedState: + if needsStarting { + if peErr := StartStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + return *peErr, nil + } + return CreateInProgressEvent("Starting stream processor", currentModel, callbackContext), nil + } + return FinalizeModel(streamProcessor, currentModel, "Create Completed") + + case StartedState: + return FinalizeModel(streamProcessor, currentModel, "Create Completed") + + case InitiatingState, CreatingState: + return CreateInProgressEvent(fmt.Sprintf("Creating stream processor (current state: %s)", currentState), currentModel, callbackContext), nil + + case FailedState: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Stream processor entered FAILED state", + }, nil + + default: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Unexpected state during creation: %s", currentState), + }, nil + } +} + +func FinalizeModel(streamProcessor *admin.StreamsProcessorWithStats, currentModel *Model, message string) (handler.ProgressEvent, error) { + resourceModel, err := GetStreamProcessorModel(streamProcessor, currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + }, nil + } + + CopyIdentifyingFields(resourceModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: message, + ResourceModel: resourceModel, + }, nil +} + +func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, ReadRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + streamProcessor, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(context.Background(), + &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + }, nil + } + return HandleError(apiResp, constants.READ, err) + } + + resourceModel, err := GetStreamProcessorModel(streamProcessor, currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + }, nil + } + + CopyIdentifyingFields(resourceModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "Read Completed", + ResourceModel: resourceModel, + }, nil +} + +func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, UpdateRequiredFields) + if peErr != nil { + return *peErr, nil + } + + if IsCallback(&req) { + callbackCtx := GetCallbackData(req) + if peErr := ValidateCallbackData(callbackCtx); peErr != nil { + return *peErr, nil + } + return HandleUpdateCallback( + context.Background(), + atlasClient, + currentModel, + callbackCtx, + ) + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + requestParams := &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + } + + currentStreamProcessor, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + }, nil + } + return HandleError(apiResp, constants.READ, err) + } + + currentState := currentStreamProcessor.GetState() + + desiredState := currentState + if currentModel.DesiredState != nil && *currentModel.DesiredState != "" { + desiredState = *currentModel.DesiredState + } else if prevModel != nil && prevModel.DesiredState != nil && *prevModel.DesiredState != "" { + desiredState = *prevModel.DesiredState + } + + if errMsg, isValid := ValidateUpdateStateTransition(currentState, desiredState); !isValid { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: errMsg, + }, nil + } + + if currentState == StartedState { + _, err := atlasClient.StreamsApi.StopStreamProcessorWithParams(ctx, + &admin.StopStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), + }, nil + } + + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Stopping stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: DefaultCallbackDelaySeconds, + CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "desiredState": desiredState, + }), + }, nil + } + + modifyAPIRequestParams, err := NewStreamProcessorUpdateReq(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating update request: %s", err.Error()), + }, nil + } + + streamProcessorResp, apiResp, err := atlasClient.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() + if err != nil { + return HandleError(apiResp, constants.UPDATE, err) + } + + if desiredState == StartedState { + _, err := atlasClient.StreamsApi.StartStreamProcessorWithParams(ctx, + &admin.StartStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error starting stream processor: %s", err.Error()), + }, nil + } + + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: "Starting stream processor", + ResourceModel: inProgressModel, + CallbackDelaySeconds: DefaultCallbackDelaySeconds, + CallbackContext: BuildCallbackContext(projectID, workspaceOrInstanceName, processorName, map[string]any{ + "desiredState": desiredState, + }), + }, nil + } + + return FinalizeModel(streamProcessorResp, currentModel, "Update Completed") +} + +func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, ListRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + + accumulatedProcessors, apiResp, err := getAllStreamProcessors(ctx, atlasClient, projectID, workspaceOrInstanceName) + if err != nil { + return HandleError(apiResp, constants.LIST, err) + } + + response := make([]interface{}, 0, len(accumulatedProcessors)) + for i := range accumulatedProcessors { + model, err := GetStreamProcessorModel(&accumulatedProcessors[i], currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error converting stream processor model: %s", err.Error()), + }, nil + } + + CopyIdentifyingFields(model, currentModel) + response = append(response, model) + } + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "List Completed", + ResourceModels: response, + }, nil +} + +func getAllStreamProcessors(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName string) ([]admin.StreamsProcessorWithStats, *http.Response, error) { + pageNum := 1 + accumulatedProcessors := make([]admin.StreamsProcessorWithStats, 0) + + for allRecordsRetrieved := false; !allRecordsRetrieved; { + processorsResp, apiResp, err := atlasClient.StreamsApi.GetStreamProcessorsWithParams(ctx, &admin.GetStreamProcessorsApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ItemsPerPage: util.Pointer(constants.DefaultListItemsPerPage), + PageNum: util.Pointer(pageNum), + }).Execute() + + if err != nil { + return nil, apiResp, err + } + + results := processorsResp.GetResults() + accumulatedProcessors = append(accumulatedProcessors, results...) + + totalCount := processorsResp.GetTotalCount() + allRecordsRetrieved = totalCount <= len(accumulatedProcessors) || len(results) < constants.DefaultListItemsPerPage + pageNum++ + } + + return accumulatedProcessors, nil, nil +} + +func HandleUpdateCallback(ctx context.Context, atlasClient *admin.APIClient, currentModel *Model, callbackCtx *CallbackData) (handler.ProgressEvent, error) { + streamProcessor, peErr := GetStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName) + if peErr != nil { + return *peErr, nil + } + + desiredState := callbackCtx.DesiredState + if desiredState == "" { + desiredState = streamProcessor.GetState() + if desiredState == "" { + if currentModel != nil && currentModel.DesiredState != nil && *currentModel.DesiredState != "" { + desiredState = *currentModel.DesiredState + } else { + desiredState = CreatedState + } + } + } + + currentState := streamProcessor.GetState() + + callbackContext := BuildCallbackContext(callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName, map[string]any{ + "desiredState": desiredState, + }) + + switch currentState { + case StoppedState, CreatedState: + modifyAPIRequestParams, err := NewStreamProcessorUpdateReq(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error creating update request: %s", err.Error()), + }, nil + } + + streamProcessorResp, apiResp, err := atlasClient.StreamsApi.UpdateStreamProcessorWithParams(ctx, modifyAPIRequestParams).Execute() + if err != nil { + return HandleError(apiResp, constants.UPDATE, err) + } + + if desiredState == StartedState { + if peErr := StartStreamProcessor(ctx, atlasClient, callbackCtx.ProjectID, callbackCtx.WorkspaceOrInstanceName, callbackCtx.ProcessorName); peErr != nil { + return *peErr, nil + } + return CreateInProgressEvent("Starting stream processor", currentModel, callbackContext), nil + } + + return FinalizeModel(streamProcessorResp, currentModel, "Update Completed") + + case StartedState: + if desiredState == StartedState { + return FinalizeModel(streamProcessor, currentModel, "Update Completed") + } + + _, err := atlasClient.StreamsApi.StopStreamProcessorWithParams(ctx, + &admin.StopStreamProcessorApiParams{ + GroupId: callbackCtx.ProjectID, + TenantName: callbackCtx.WorkspaceOrInstanceName, + ProcessorName: callbackCtx.ProcessorName, + }, + ).Execute() + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error stopping stream processor: %s", err.Error()), + }, nil + } + return CreateInProgressEvent("Stopping stream processor", currentModel, callbackContext), nil + + case FailedState: + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Stream processor entered FAILED state", + }, nil + + default: + return CreateInProgressEvent(fmt.Sprintf("Updating stream processor (current state: %s)", currentState), currentModel, callbackContext), nil + } +} + +func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { + atlasClient, peErr := InitEnvWithLatestClient(req, currentModel, DeleteRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, err := GetWorkspaceOrInstanceName(currentModel) + if err != nil { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: err.Error(), + }, nil + } + + ctx := context.Background() + projectID := util.SafeString(currentModel.ProjectId) + processorName := util.SafeString(currentModel.ProcessorName) + + apiResp, err := atlasClient.StreamsApi.DeleteStreamProcessor(ctx, projectID, workspaceOrInstanceName, processorName).Execute() + if err != nil { + if apiResp != nil && apiResp.StatusCode == http.StatusNotFound { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Resource not found", + HandlerErrorCode: "NotFound", + }, nil + } + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error deleting stream processor: %s", err.Error()), + }, nil + } + + return handler.ProgressEvent{ + OperationStatus: handler.Success, + Message: "Delete Completed", + }, nil +} + +func GetStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName, processorName string) (*admin.StreamsProcessorWithStats, *handler.ProgressEvent) { + requestParams := &admin.GetStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + } + + streamProcessor, resp, err := atlasClient.StreamsApi.GetStreamProcessorWithParams(ctx, requestParams).Execute() + if err != nil { + if resp != nil && resp.StatusCode == http.StatusNotFound { + return nil, &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Stream processor not found", + HandlerErrorCode: "NotFound", + } + } + return nil, &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error getting stream processor: %s", err.Error()), + } + } + return streamProcessor, nil +} + +func StartStreamProcessor(ctx context.Context, atlasClient *admin.APIClient, projectID, workspaceOrInstanceName, processorName string) *handler.ProgressEvent { + _, err := atlasClient.StreamsApi.StartStreamProcessorWithParams(ctx, + &admin.StartStreamProcessorApiParams{ + GroupId: projectID, + TenantName: workspaceOrInstanceName, + ProcessorName: processorName, + }, + ).Execute() + if err != nil { + return &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: fmt.Sprintf("Error starting stream processor: %s", err.Error()), + } + } + return nil +} + +func CreateInProgressEvent(message string, currentModel *Model, callbackContext map[string]any) handler.ProgressEvent { + inProgressModel := &Model{} + if currentModel != nil { + *inProgressModel = *currentModel + inProgressModel.DeleteOnCreateTimeout = nil + } + CopyIdentifyingFields(inProgressModel, currentModel) + + return handler.ProgressEvent{ + OperationStatus: handler.InProgress, + Message: message, + ResourceModel: inProgressModel, + CallbackDelaySeconds: DefaultCallbackDelaySeconds, + CallbackContext: callbackContext, + } +} + +func ValidateUpdateStateTransition(currentState, desiredState string) (errMsg string, isValidTransition bool) { + if currentState == desiredState { + return "", true + } + + if desiredState == StoppedState && currentState != StartedState { + return fmt.Sprintf("Stream Processor must be in %s state to transition to %s state", StartedState, StoppedState), false + } + + if desiredState == CreatedState { + return fmt.Sprintf("Stream Processor cannot transition from %s to CREATED", currentState), false + } + + return "", true +} + +func HandleError(response *http.Response, method constants.CfnFunctions, err error) (handler.ProgressEvent, error) { + errMsg := fmt.Sprintf("%s error:%s", method, err.Error()) + + if response != nil && response.StatusCode == http.StatusConflict { + return handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: errMsg, + HandlerErrorCode: "AlreadyExists", + }, nil + } + + return progressevent.GetFailedEventByResponse(errMsg, response), nil +} diff --git a/cfn-resources/stream-processor/cmd/resource/resource_test.go b/cfn-resources/stream-processor/cmd/resource/resource_test.go new file mode 100644 index 000000000..6718bc65d --- /dev/null +++ b/cfn-resources/stream-processor/cmd/resource/resource_test.go @@ -0,0 +1,2336 @@ +// Copyright 2024 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource_test + +import ( + "context" + "fmt" + "net/http" + "testing" + "time" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-processor/cmd/resource" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/validator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + admin20250312010 "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312010/mockadmin" +) + +func TestIsCallback(t *testing.T) { + testCases := map[string]struct { + req handler.Request + expectedResult bool + }{ + "isCallback": { + req: handler.Request{ + CallbackContext: map[string]any{ + "callbackStreamProcessor": true, + }, + }, + expectedResult: true, + }, + "notCallback": { + req: handler.Request{ + CallbackContext: map[string]any{}, + }, + expectedResult: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := resource.IsCallback(&tc.req) + assert.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestGetCallbackData(t *testing.T) { + testCases := map[string]struct { + expectedResult *resource.CallbackData + req handler.Request + }{ + "allFieldsPresent": { + req: handler.Request{ + CallbackContext: map[string]any{ + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + "needsStarting": true, + "desiredState": "STARTED", + "startTime": "2024-01-01T00:00:00Z", + "timeoutDuration": "20m", + "deleteOnCreateTimeout": true, + }, + }, + expectedResult: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: true, + DesiredState: "STARTED", + StartTime: "2024-01-01T00:00:00Z", + TimeoutDuration: "20m", + DeleteOnCreateTimeout: true, + }, + }, + "partialFields": { + req: handler.Request{ + CallbackContext: map[string]any{ + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + }, + }, + expectedResult: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + }, + }, + "emptyContext": { + req: handler.Request{ + CallbackContext: map[string]any{}, + }, + expectedResult: &resource.CallbackData{}, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := resource.GetCallbackData(tc.req) + assert.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestValidateCallbackData(t *testing.T) { + testCases := map[string]struct { + callbackCtx *resource.CallbackData + expectedMsgContain string + expectedError bool + }{ + "valid": { + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + }, + expectedError: false, + }, + "missingProjectID": { + callbackCtx: &resource.CallbackData{ + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + }, + expectedError: true, + expectedMsgContain: "Missing required values", + }, + "missingWorkspaceName": { + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + ProcessorName: "processor-1", + }, + expectedError: true, + expectedMsgContain: "Missing required values", + }, + "missingProcessorName": { + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + }, + expectedError: true, + expectedMsgContain: "Missing required values", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + peErr := resource.ValidateCallbackData(tc.callbackCtx) + if tc.expectedError { + require.NotNil(t, peErr) + assert.Contains(t, peErr.Message, tc.expectedMsgContain) + } else { + require.Nil(t, peErr) + } + }) + } +} + +func TestCopyIdentifyingFields(t *testing.T) { + testCases := map[string]struct { + resourceModel *resource.Model + currentModel *resource.Model + validateFunc func(t *testing.T, resourceModel *resource.Model) + }{ + "withWorkspaceName": { + resourceModel: &resource.Model{}, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + validateFunc: func(t *testing.T, rm *resource.Model) { + t.Helper() + assert.Equal(t, "default", util.SafeString(rm.Profile)) + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(rm.ProjectId)) + assert.Equal(t, "processor-1", util.SafeString(rm.ProcessorName)) + assert.Equal(t, "workspace-1", util.SafeString(rm.WorkspaceName)) + // Primary identifier requires both fields - InstanceName should be set from WorkspaceName + assert.Equal(t, "workspace-1", util.SafeString(rm.InstanceName)) + }, + }, + "withInstanceName": { + resourceModel: &resource.Model{}, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + InstanceName: util.StringPtr("instance-1"), + }, + validateFunc: func(t *testing.T, rm *resource.Model) { + t.Helper() + assert.Equal(t, "default", util.SafeString(rm.Profile)) + assert.Equal(t, "507f1f77bcf86cd799439011", util.SafeString(rm.ProjectId)) + assert.Equal(t, "processor-1", util.SafeString(rm.ProcessorName)) + assert.Equal(t, "instance-1", util.SafeString(rm.InstanceName)) + // Primary identifier requires both fields - WorkspaceName should be set from InstanceName + assert.Equal(t, "instance-1", util.SafeString(rm.WorkspaceName)) + }, + }, + "emptyWorkspaceName": { + resourceModel: &resource.Model{}, + currentModel: &resource.Model{ + Profile: util.StringPtr("default"), + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr(""), + }, + validateFunc: func(t *testing.T, rm *resource.Model) { + t.Helper() + assert.Nil(t, rm.WorkspaceName) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + resource.CopyIdentifyingFields(tc.resourceModel, tc.currentModel) + if tc.validateFunc != nil { + tc.validateFunc(t, tc.resourceModel) + } + }) + } +} + +func TestBuildCallbackContext(t *testing.T) { + testCases := map[string]struct { + additionalFields map[string]any + validateFunc func(t *testing.T, ctx map[string]any) + projectID string + workspaceOrInstanceName string + processorName string + }{ + "basic": { + projectID: "507f1f77bcf86cd799439011", + workspaceOrInstanceName: "workspace-1", + processorName: "processor-1", + additionalFields: map[string]any{}, + validateFunc: func(t *testing.T, ctx map[string]any) { + t.Helper() + assert.True(t, ctx["callbackStreamProcessor"].(bool)) + assert.Equal(t, "507f1f77bcf86cd799439011", ctx["projectID"]) + assert.Equal(t, "workspace-1", ctx["workspaceName"]) + assert.Equal(t, "processor-1", ctx["processorName"]) + }, + }, + "withAdditionalFields": { + projectID: "507f1f77bcf86cd799439011", + workspaceOrInstanceName: "workspace-1", + processorName: "processor-1", + additionalFields: map[string]any{ + "needsStarting": true, + "desiredState": "STARTED", + }, + validateFunc: func(t *testing.T, ctx map[string]any) { + t.Helper() + assert.True(t, ctx["callbackStreamProcessor"].(bool)) + assert.Equal(t, "507f1f77bcf86cd799439011", ctx["projectID"]) + assert.True(t, ctx["needsStarting"].(bool)) + assert.Equal(t, "STARTED", ctx["desiredState"]) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + ctx := resource.BuildCallbackContext(tc.projectID, tc.workspaceOrInstanceName, tc.processorName, tc.additionalFields) + if tc.validateFunc != nil { + tc.validateFunc(t, ctx) + } + }) + } +} + +func TestParseTimeout(t *testing.T) { + testCases := map[string]struct { + timeoutStr string + expectedResult time.Duration + }{ + "validDuration": { + timeoutStr: "20m", + expectedResult: 20 * time.Minute, + }, + "validSeconds": { + timeoutStr: "30s", + expectedResult: 30 * time.Second, + }, + "emptyString": { + timeoutStr: "", + expectedResult: resource.DefaultCreateTimeout, + }, + "invalidFormat": { + timeoutStr: "invalid", + expectedResult: resource.DefaultCreateTimeout, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := resource.ParseTimeout(tc.timeoutStr) + assert.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestIsTimeoutExceeded(t *testing.T) { + testCases := map[string]struct { + startTimeStr string + timeoutDurationStr string + expectedResult bool + }{ + "timeoutExceeded": { + startTimeStr: time.Now().Add(-25 * time.Minute).Format(time.RFC3339), + timeoutDurationStr: "20m", + expectedResult: true, + }, + "timeoutNotExceeded": { + startTimeStr: time.Now().Add(-10 * time.Minute).Format(time.RFC3339), + timeoutDurationStr: "20m", + expectedResult: false, + }, + "emptyStartTime": { + startTimeStr: "", + timeoutDurationStr: "20m", + expectedResult: false, + }, + "emptyTimeoutDuration": { + startTimeStr: time.Now().Format(time.RFC3339), + timeoutDurationStr: "", + expectedResult: false, + }, + "invalidStartTime": { + startTimeStr: "invalid", + timeoutDurationStr: "20m", + expectedResult: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + result := resource.IsTimeoutExceeded(tc.startTimeStr, tc.timeoutDurationStr) + assert.Equal(t, tc.expectedResult, result) + }) + } +} + +func TestCreateInProgressEvent(t *testing.T) { + testCases := map[string]struct { + currentModel *resource.Model + callbackContext map[string]any + validateFunc func(t *testing.T, event handler.ProgressEvent) + message string + }{ + "basic": { + message: "Creating stream processor", + currentModel: &resource.Model{ProjectId: util.StringPtr("507f1f77bcf86cd799439011")}, + callbackContext: map[string]any{ + "projectID": "507f1f77bcf86cd799439011", + }, + validateFunc: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Equal(t, handler.InProgress, event.OperationStatus) + assert.Equal(t, "Creating stream processor", event.Message) + assert.Equal(t, int64(resource.DefaultCallbackDelaySeconds), event.CallbackDelaySeconds) + assert.NotNil(t, event.CallbackContext) + }, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + event := resource.CreateInProgressEvent(tc.message, tc.currentModel, tc.callbackContext) + if tc.validateFunc != nil { + tc.validateFunc(t, event) + } + }) + } +} + +func TestValidateUpdateStateTransition(t *testing.T) { + testCases := map[string]struct { + currentState string + desiredState string + expectedErrMsg string + expectedIsValid bool + }{ + "validCREATEDtoSTARTED": { + currentState: resource.CreatedState, + desiredState: resource.StartedState, + expectedIsValid: true, + }, + "invalidSTARTEDtoCREATED": { + currentState: resource.StartedState, + desiredState: resource.CreatedState, + expectedIsValid: false, + expectedErrMsg: "cannot transition from STARTED to CREATED", + }, + "validSTARTEDtoSTOPPED": { + currentState: resource.StartedState, + desiredState: resource.StoppedState, + expectedIsValid: true, + }, + "invalidCREATEDtoSTOPPED": { + currentState: resource.CreatedState, + desiredState: resource.StoppedState, + expectedIsValid: false, + expectedErrMsg: "must be in STARTED state", + }, + "sameState": { + currentState: resource.CreatedState, + desiredState: resource.CreatedState, + expectedIsValid: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + errMsg, isValid := resource.ValidateUpdateStateTransition(tc.currentState, tc.desiredState) + assert.Equal(t, tc.expectedIsValid, isValid) + if !tc.expectedIsValid { + assert.Contains(t, errMsg, tc.expectedErrMsg) + } + }) + } +} + +func TestList(t *testing.T) { + // Save original function + originalInitEnv := resource.InitEnvWithLatestClient + defer func() { + resource.InitEnvWithLatestClient = originalInitEnv + }() + + testCases := map[string]struct { + currentModel *resource.Model + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + req handler.Request + expectedCount int + }{ + "successfulListSinglePage": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) + processors := &admin20250312010.PaginatedApiStreamsStreamProcessorWithStats{ + Results: &[]admin20250312010.StreamsProcessorWithStats{ + { + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + }, + { + Name: "processor-2", + Id: "507f1f77bcf86cd799439012", + State: resource.StartedState, + }, + }, + TotalCount: util.Pointer(2), + } + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + expectedCount: 2, + }, + "successfulListMultiplePages": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + // First page - returns full page (100 items) but totalCount is 101, so we need to fetch page 2 + // Create 100 processors for first page + firstPageResults := make([]admin20250312010.StreamsProcessorWithStats, 100) + for i := 0; i < 100; i++ { + firstPageResults[i] = admin20250312010.StreamsProcessorWithStats{ + Name: fmt.Sprintf("processor-%d", i+1), + Id: fmt.Sprintf("507f1f77bcf86cd79943%03d", i), + State: resource.CreatedState, + } + } + req1 := admin20250312010.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.MatchedBy(func(params *admin20250312010.GetStreamProcessorsApiParams) bool { + return params.PageNum != nil && *params.PageNum == 1 + })).Return(req1).Once() + processors1 := &admin20250312010.PaginatedApiStreamsStreamProcessorWithStats{ + Results: &firstPageResults, + TotalCount: util.Pointer(101), + } + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors1, &http.Response{StatusCode: 200}, nil).Once() + + // Second page - returns 1 remaining item + req2 := admin20250312010.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.MatchedBy(func(params *admin20250312010.GetStreamProcessorsApiParams) bool { + return params.PageNum != nil && *params.PageNum == 2 + })).Return(req2).Once() + processors2 := &admin20250312010.PaginatedApiStreamsStreamProcessorWithStats{ + Results: &[]admin20250312010.StreamsProcessorWithStats{ + { + Name: "processor-101", + Id: "507f1f77bcf86cd799439101", + State: resource.StartedState, + }, + }, + TotalCount: util.Pointer(101), + } + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors2, &http.Response{StatusCode: 200}, nil).Once() + }, + expectedStatus: handler.Success, + expectedCount: 101, + }, + "successfulListEmpty": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) + processors := &admin20250312010.PaginatedApiStreamsStreamProcessorWithStats{ + Results: &[]admin20250312010.StreamsProcessorWithStats{}, + TotalCount: util.Pointer(0), + } + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + expectedCount: 0, + }, + "listWithInstanceName": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + InstanceName: util.StringPtr("instance-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) + processors := &admin20250312010.PaginatedApiStreamsStreamProcessorWithStats{ + Results: &[]admin20250312010.StreamsProcessorWithStats{ + { + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + }, + }, + TotalCount: util.Pointer(1), + } + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + expectedCount: 1, + }, + "listWithApiError": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("server error")) + }, + expectedStatus: handler.Failed, + expectedCount: 0, + }, + "listWithMissingWorkspaceAndInstance": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + }, + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + expectedCount: 0, + }, + "listWithPipelineAndOptions": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorsApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorsWithParams(mock.Anything, mock.Anything).Return(req) + pipeline := []any{ + map[string]any{"$match": map[string]any{"status": "active"}}, + } + stats := map[string]any{ + "processed": 100, + "errors": 0, + } + processors := &admin20250312010.PaginatedApiStreamsStreamProcessorWithStats{ + Results: &[]admin20250312010.StreamsProcessorWithStats{ + { + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + Pipeline: pipeline, + Stats: stats, + Options: &admin20250312010.StreamsOptions{ + Dlq: &admin20250312010.StreamsDLQ{ + Coll: util.StringPtr("dlq_collection"), + ConnectionName: util.StringPtr("connection-1"), + Db: util.StringPtr("dlq_db"), + }, + }, + }, + }, + TotalCount: util.Pointer(1), + } + m.EXPECT().GetStreamProcessorsExecute(mock.Anything).Return(processors, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + expectedCount: 1, + }, + "listValidationError": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + expectedCount: 0, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // For validation error test cases, validation should fail before API calls + if name == "listValidationError" || name == "listWithMissingWorkspaceAndInstance" { + // Mock initEnvWithLatestClient to run real validation but return error + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin20250312010.APIClient, *handler.ProgressEvent) { + // Run validation - this should fail for these test cases + if errEvent := validator.ValidateModel(requiredFields, currentModel); errEvent != nil { + return nil, errEvent + } + // If validation passes (shouldn't happen for these cases), return nil client + return nil, &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "unexpected validation success", + } + } + // Validation will fail and return early, so no API calls should be made + event, err := resource.List(tc.req, nil, tc.currentModel) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + return + } + + // Mock initEnvWithLatestClient for other test cases + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin20250312010.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + + event, err := resource.List(tc.req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + + if tc.expectedStatus == handler.Success { + require.NotNil(t, event.ResourceModels) + assert.Len(t, event.ResourceModels, tc.expectedCount) + + // Verify that each model has identifying fields set + for i, rm := range event.ResourceModels { + model, ok := rm.(*resource.Model) + require.True(t, ok, "ResourceModel[%d] should be *Model", i) + assert.Equal(t, tc.currentModel.ProjectId, model.ProjectId) + if tc.currentModel.WorkspaceName != nil { + assert.Equal(t, tc.currentModel.WorkspaceName, model.WorkspaceName) + } + if tc.currentModel.InstanceName != nil { + assert.Equal(t, tc.currentModel.InstanceName, model.InstanceName) + } + } + } + }) + } +} + +func TestHandleError(t *testing.T) { + testCases := map[string]struct { + response *http.Response + method constants.CfnFunctions + err error + expectedStatus handler.Status + expectedErrorCode string + expectedMsgContain string + }{ + "conflictError": { + response: &http.Response{ + StatusCode: http.StatusConflict, + }, + method: constants.CREATE, + err: fmt.Errorf("resource already exists"), + expectedStatus: handler.Failed, + expectedErrorCode: "AlreadyExists", + expectedMsgContain: "CREATE error:resource already exists", + }, + "otherError": { + response: &http.Response{ + StatusCode: http.StatusBadRequest, + }, + method: constants.UPDATE, + err: fmt.Errorf("invalid request"), + expectedStatus: handler.Failed, + expectedErrorCode: "", + expectedMsgContain: "UPDATE error:invalid request", + }, + "nilResponse": { + response: nil, + method: constants.DELETE, + err: fmt.Errorf("network error"), + expectedStatus: handler.Failed, + expectedErrorCode: "", + expectedMsgContain: "DELETE error:network error", + }, + "notFoundError": { + response: &http.Response{ + StatusCode: http.StatusNotFound, + }, + method: constants.READ, + err: fmt.Errorf("resource not found"), + expectedStatus: handler.Failed, + expectedErrorCode: "", + expectedMsgContain: "READ error:resource not found", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + event, err := resource.HandleError(tc.response, tc.method, tc.err) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsgContain) + if tc.expectedErrorCode != "" { + assert.Equal(t, tc.expectedErrorCode, event.HandlerErrorCode) + } + }) + } +} + +func TestFinalizeModel(t *testing.T) { + testCases := map[string]struct { + streamProcessor *admin20250312010.StreamsProcessorWithStats + currentModel *resource.Model + message string + expectedStatus handler.Status + expectedError bool + }{ + "successfulFinalize": { + streamProcessor: &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + }, + message: "Create Complete", + expectedStatus: handler.Success, + expectedError: false, + }, + "withPipeline": { + streamProcessor: func() *admin20250312010.StreamsProcessorWithStats { + pipeline := []any{ + map[string]any{"$match": map[string]any{"status": "active"}}, + } + return &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "STARTED", + Pipeline: pipeline, + } + }(), + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + }, + message: "Update Complete", + expectedStatus: handler.Success, + expectedError: false, + }, + "withStats": { + streamProcessor: func() *admin20250312010.StreamsProcessorWithStats { + stats := map[string]any{ + "processed": 100, + "errors": 0, + } + return &admin20250312010.StreamsProcessorWithStats{ + Name: "test-processor", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + Stats: stats, + } + }(), + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("test-processor"), + }, + message: "Read Complete", + expectedStatus: handler.Success, + expectedError: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + event, err := resource.FinalizeModel(tc.streamProcessor, tc.currentModel, tc.message) + + if tc.expectedError { + require.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Equal(t, tc.message, event.Message) + require.NotNil(t, event.ResourceModel) + resourceModel, ok := event.ResourceModel.(*resource.Model) + require.True(t, ok, "ResourceModel should be *Model") + assert.Equal(t, tc.currentModel.ProjectId, resourceModel.ProjectId) + assert.Equal(t, tc.currentModel.ProcessorName, resourceModel.ProcessorName) + } + }) + } +} + +func TestCleanupOnCreateTimeout(t *testing.T) { + testCases := map[string]struct { + callbackCtx *resource.CallbackData + mockSetup func(*mockadmin.StreamsApi) + expectedNoAPICall bool + }{ + "deleteOnCreateTimeoutFalse": { + callbackCtx: &resource.CallbackData{ + DeleteOnCreateTimeout: false, + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + }, + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedNoAPICall: true, + }, + "deleteOnCreateTimeoutTrue": { + callbackCtx: &resource.CallbackData{ + DeleteOnCreateTimeout: true, + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedNoAPICall: false, + }, + "deleteOnCreateTimeoutTrueWithError": { + callbackCtx: &resource.CallbackData{ + DeleteOnCreateTimeout: true, + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("delete failed")) + }, + expectedNoAPICall: false, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + err := resource.CleanupOnCreateTimeout(context.Background(), mockClient, tc.callbackCtx) + assert.NoError(t, err) + }) + } +} + +func TestSetup(t *testing.T) { + assert.NotPanics(t, func() { + resource.Setup() + }) +} + +func TestGetStreamProcessor(t *testing.T) { + testCases := map[string]struct { + mockSetup func(*mockadmin.StreamsApi) + projectID string + workspaceOrInstanceName string + processorName string + expectedState string + expectedError bool + }{ + "success": { + projectID: "507f1f77bcf86cd799439011", + workspaceOrInstanceName: "workspace-1", + processorName: "processor-1", + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: "CREATED", + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedError: false, + expectedState: "CREATED", + }, + "notFound": { + projectID: "507f1f77bcf86cd799439011", + workspaceOrInstanceName: "workspace-1", + processorName: "processor-1", + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) + }, + expectedError: true, + }, + "apiError": { + projectID: "507f1f77bcf86cd799439011", + workspaceOrInstanceName: "workspace-1", + processorName: "processor-1", + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("server error")) + }, + expectedError: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + processor, peErr := resource.GetStreamProcessor(context.Background(), mockClient, tc.projectID, tc.workspaceOrInstanceName, tc.processorName) + + if tc.expectedError { + require.NotNil(t, peErr) + assert.Nil(t, processor) + } else { + require.Nil(t, peErr) + require.NotNil(t, processor) + assert.Equal(t, tc.expectedState, processor.GetState()) + } + }) + } +} + +func TestStartStreamProcessor(t *testing.T) { + testCases := map[string]struct { + mockSetup func(*mockadmin.StreamsApi) + projectID string + workspaceOrInstanceName string + processorName string + expectedError bool + }{ + "success": { + projectID: "507f1f77bcf86cd799439011", + workspaceOrInstanceName: "workspace-1", + processorName: "processor-1", + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedError: false, + }, + "apiError": { + projectID: "507f1f77bcf86cd799439011", + workspaceOrInstanceName: "workspace-1", + processorName: "processor-1", + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("start failed")) + }, + expectedError: true, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + peErr := resource.StartStreamProcessor(context.Background(), mockClient, tc.projectID, tc.workspaceOrInstanceName, tc.processorName) + + if tc.expectedError { + require.NotNil(t, peErr) + } else { + require.Nil(t, peErr) + } + }) + } +} + +func TestHandleCreateCallback(t *testing.T) { + testCases := map[string]struct { + currentModel *resource.Model + callbackCtx *resource.CallbackData + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + }{ + "createdStateNoStart": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "createdStateNeedsStart": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: true, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1).Once() + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil).Once() + + startReq := admin20250312010.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedStatus: handler.InProgress, + }, + "startedState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StartedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "failedState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.FailedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Failed, + }, + "initiatingState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.InitiatingState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "creatingState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatingState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "timeoutExceeded": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Add(-25 * time.Minute).Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + }, + "timeoutExceededWithCleanup": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Add(-25 * time.Minute).Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: true, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(admin20250312010.DeleteStreamProcessorApiRequest{ApiService: m}) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedStatus: handler.Failed, + }, + "timeoutWithCleanupError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Add(-25 * time.Minute).Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: true, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("delete failed")) + }, + expectedStatus: handler.Failed, + }, + "unexpectedState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: "UNEXPECTED_STATE", + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Failed, + }, + "getStreamProcessorError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: false, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("server error")) + }, + expectedStatus: handler.Failed, + }, + "startProcessorError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + NeedsStarting: true, + StartTime: time.Now().Format(time.RFC3339), + TimeoutDuration: "20m", + DeleteOnCreateTimeout: false, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1).Once() + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil).Once() + + startReq := admin20250312010.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("start failed")) + }, + expectedStatus: handler.Failed, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + event, err := resource.HandleCreateCallback(context.Background(), mockClient, tc.currentModel, tc.callbackCtx) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + }) + } +} + +//nolint:funlen // Test cases map is necessarily long +func getTestHandleUpdateCallbackCases() map[string]struct { + currentModel *resource.Model + callbackCtx *resource.CallbackData + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status +} { + return map[string]struct { + currentModel *resource.Model + callbackCtx *resource.CallbackData + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + }{ + "stoppedStateUpdate": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.CreatedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StoppedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + updateReq := admin20250312010.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + updatedProcessor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(updatedProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "startedStateNeedsStop": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.StoppedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StartedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + stopReq := admin20250312010.StopStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) + m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedStatus: handler.InProgress, + }, + "startedStateSameDesiredState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.StartedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StartedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + // When state is already STARTED and planned state is STARTED, code just finalizes without calling Update + }, + expectedStatus: handler.Success, + }, + "stoppedStateUpdateAndStart": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.StartedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StoppedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + updateReq := admin20250312010.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + updatedProcessor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StoppedState, + } + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(updatedProcessor, &http.Response{StatusCode: 200}, nil) + + startReq := admin20250312010.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedStatus: handler.InProgress, + }, + "failedState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.CreatedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.FailedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Failed, + }, + "defaultTransitioningState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.CreatedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: "STOPPING", + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "emptyDesiredState": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: "", + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + updateReq := admin20250312010.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + updatedProcessor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(updatedProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "updateError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.CreatedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StoppedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + updateReq := admin20250312010.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("update failed")) + }, + expectedStatus: handler.Failed, + }, + "stopError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.StoppedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StartedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + stopReq := admin20250312010.StopStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StopStreamProcessorWithParams(mock.Anything, mock.Anything).Return(stopReq) + m.EXPECT().StopStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("stop failed")) + }, + expectedStatus: handler.Failed, + }, + "startAfterUpdateError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.StartedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StoppedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + updateReq := admin20250312010.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + updatedProcessor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StoppedState, + } + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(updatedProcessor, &http.Response{StatusCode: 200}, nil) + + startReq := admin20250312010.StartStreamProcessorApiRequest{ApiService: m} + m.EXPECT().StartStreamProcessorWithParams(mock.Anything, mock.Anything).Return(startReq) + m.EXPECT().StartStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("start failed")) + }, + expectedStatus: handler.Failed, + }, + "getStreamProcessorErrorInUpdate": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.CreatedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("get failed")) + }, + expectedStatus: handler.Failed, + }, + "updateRequestError": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`invalid json`), // Invalid JSON will cause error + }, + callbackCtx: &resource.CallbackData{ + ProjectID: "507f1f77bcf86cd799439011", + WorkspaceOrInstanceName: "workspace-1", + ProcessorName: "processor-1", + DesiredState: resource.CreatedState, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.StoppedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Failed, + }, + } +} + +func TestHandleUpdateCallback(t *testing.T) { + testCases := getTestHandleUpdateCallbackCases() + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + event, err := resource.HandleUpdateCallback(context.Background(), mockClient, tc.currentModel, tc.callbackCtx) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + }) + } +} + +// TestCreateValidationErrors tests validation paths in Create handler +func TestCreateValidationErrors(t *testing.T) { + testCases := map[string]struct { + currentModel *resource.Model + expectedStatus handler.Status + expectedMsg string + }{ + "missingProjectId": { + currentModel: &resource.Model{ + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + "missingProcessorName": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + req := handler.Request{ + RequestContext: handler.RequestContext{}, + } + event, err := resource.Create(req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + }) + } +} + +// TestReadValidationErrors tests validation paths in Read handler +func TestReadValidationErrors(t *testing.T) { + testCases := map[string]struct { + currentModel *resource.Model + expectedStatus handler.Status + expectedMsg string + }{ + "missingProjectId": { + currentModel: &resource.Model{ + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + "missingProcessorName": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + req := handler.Request{ + RequestContext: handler.RequestContext{}, + } + event, err := resource.Read(req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + }) + } +} + +// TestUpdateValidationErrors tests validation paths in Update handler +func TestUpdateValidationErrors(t *testing.T) { + testCases := map[string]struct { + currentModel *resource.Model + expectedStatus handler.Status + expectedMsg string + }{ + "missingProjectId": { + currentModel: &resource.Model{ + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + "missingProcessorName": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + req := handler.Request{ + RequestContext: handler.RequestContext{}, + } + event, err := resource.Update(req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + }) + } +} + +// TestDeleteValidationErrors tests validation paths in Delete handler +func TestDeleteValidationErrors(t *testing.T) { + testCases := map[string]struct { + currentModel *resource.Model + expectedStatus handler.Status + expectedMsg string + }{ + "missingProjectId": { + currentModel: &resource.Model{ + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + "missingProcessorName": { + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + expectedStatus: handler.Failed, + expectedMsg: "required", + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + req := handler.Request{ + RequestContext: handler.RequestContext{}, + } + event, err := resource.Delete(req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + }) + } +} + +// TestCreateWithMocks tests the Create handler with mocked client initialization +func TestCreateWithMocks(t *testing.T) { + // Save original function + originalInitEnv := resource.InitEnvWithLatestClient + defer func() { + resource.InitEnvWithLatestClient = originalInitEnv + }() + + testCases := map[string]struct { + currentModel *resource.Model + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + req handler.Request + }{ + "successfulCreate": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + DesiredState: util.StringPtr(resource.CreatedState), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.CreateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "createWithStateStarted": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + DesiredState: util.StringPtr(resource.StartedState), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.CreateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.InProgress, + }, + "createWithCallback": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + CallbackContext: map[string]any{ + "callbackStreamProcessor": true, + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + "needsStarting": false, + "startTime": time.Now().Format(time.RFC3339), + "timeoutDuration": "20m", + "deleteOnCreateTimeout": false, + }, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "createWithInvalidState": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + DesiredState: util.StringPtr("INVALID_STATE"), + }, + mockSetup: func(m *mockadmin.StreamsApi) {}, + expectedStatus: handler.Failed, + }, + "createWithApiError": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.CreateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().CreateStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().CreateStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("API error")) + }, + expectedStatus: handler.Failed, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Mock initEnvWithLatestClient + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin20250312010.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + + event, err := resource.Create(tc.req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + }) + } +} + +// TestReadWithMocks tests the Read handler with mocked client initialization +func TestReadWithMocks(t *testing.T) { + // Save original function + originalInitEnv := resource.InitEnvWithLatestClient + defer func() { + resource.InitEnvWithLatestClient = originalInitEnv + }() + + testCases := map[string]struct { + currentModel *resource.Model + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + req handler.Request + }{ + "successfulRead": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + pipeline := []any{ + map[string]any{"$match": map[string]any{"status": "active"}}, + } + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + Pipeline: pipeline, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "readNotFound": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) + }, + expectedStatus: handler.Failed, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Mock initEnvWithLatestClient + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin20250312010.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + + event, err := resource.Read(tc.req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + }) + } +} + +// TestUpdateWithMocks tests the Update handler with mocked client initialization +func TestUpdateWithMocks(t *testing.T) { + // Save original function + originalInitEnv := resource.InitEnvWithLatestClient + defer func() { + resource.InitEnvWithLatestClient = originalInitEnv + }() + + testCases := map[string]struct { + prevModel *resource.Model + currentModel *resource.Model + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + req handler.Request + }{ + "successfulUpdate": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + prevModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + DesiredState: util.StringPtr(resource.CreatedState), + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + DesiredState: util.StringPtr(resource.CreatedState), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + // Get current state + req1 := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req1) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + // Update + updateReq := admin20250312010.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + updatedProcessor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(updatedProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "updateWithCallback": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + CallbackContext: map[string]any{ + "callbackStreamProcessor": true, + "projectID": "507f1f77bcf86cd799439011", + "workspaceName": "workspace-1", + "processorName": "processor-1", + "desiredState": resource.CreatedState, + }, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + Pipeline: util.StringPtr(`[{"$match": {"status": "active"}}]`), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.GetStreamProcessorApiRequest{ApiService: m} + m.EXPECT().GetStreamProcessorWithParams(mock.Anything, mock.Anything).Return(req) + processor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().GetStreamProcessorExecute(mock.Anything).Return(processor, &http.Response{StatusCode: 200}, nil) + + updateReq := admin20250312010.UpdateStreamProcessorApiRequest{ApiService: m} + m.EXPECT().UpdateStreamProcessorWithParams(mock.Anything, mock.Anything).Return(updateReq) + updatedProcessor := &admin20250312010.StreamsProcessorWithStats{ + Name: "processor-1", + Id: "507f1f77bcf86cd799439011", + State: resource.CreatedState, + } + m.EXPECT().UpdateStreamProcessorExecute(mock.Anything).Return(updatedProcessor, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Mock initEnvWithLatestClient + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin20250312010.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + + event, err := resource.Update(tc.req, tc.prevModel, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + }) + } +} + +// TestDeleteWithMocks tests the Delete handler with mocked client initialization +func TestDeleteWithMocks(t *testing.T) { + // Save original function + originalInitEnv := resource.InitEnvWithLatestClient + defer func() { + resource.InitEnvWithLatestClient = originalInitEnv + }() + + testCases := map[string]struct { + currentModel *resource.Model + mockSetup func(*mockadmin.StreamsApi) + expectedStatus handler.Status + req handler.Request + }{ + "successfulDelete": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, nil) + }, + expectedStatus: handler.Success, + }, + "deleteWithError": { + req: handler.Request{ + RequestContext: handler.RequestContext{}, + }, + currentModel: &resource.Model{ + ProjectId: util.StringPtr("507f1f77bcf86cd799439011"), + ProcessorName: util.StringPtr("processor-1"), + WorkspaceName: util.StringPtr("workspace-1"), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + req := admin20250312010.DeleteStreamProcessorApiRequest{ApiService: m} + m.EXPECT().DeleteStreamProcessor(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(req) + m.EXPECT().DeleteStreamProcessorExecute(mock.Anything).Return(nil, fmt.Errorf("delete failed")) + }, + expectedStatus: handler.Failed, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + // Mock initEnvWithLatestClient + mockStreamsAPI := mockadmin.NewStreamsApi(t) + tc.mockSetup(mockStreamsAPI) + + mockClient := &admin20250312010.APIClient{} + mockClient.StreamsApi = mockStreamsAPI + + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin20250312010.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + + event, err := resource.Delete(tc.req, nil, tc.currentModel) + + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + }) + } +} diff --git a/cfn-resources/stream-processor/docs/README.md b/cfn-resources/stream-processor/docs/README.md new file mode 100644 index 000000000..3cae09fda --- /dev/null +++ b/cfn-resources/stream-processor/docs/README.md @@ -0,0 +1,179 @@ +# MongoDB::Atlas::StreamProcessor + +Returns, adds, edits, and removes Atlas Stream Processors. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Type" : "MongoDB::Atlas::StreamProcessor",
+    "Properties" : {
+        "Profile" : String,
+        "ProjectId" : String,
+        "InstanceName" : String,
+        "WorkspaceName" : String,
+        "ProcessorName" : String,
+        "Pipeline" : String,
+        "DesiredState" : String,
+        "Options" : StreamsOptions,
+        "Timeouts" : Timeouts,
+        "DeleteOnCreateTimeout" : Boolean
+    }
+}
+
+ +### YAML + +
+Type: MongoDB::Atlas::StreamProcessor
+Properties:
+    Profile: String
+    ProjectId: String
+    InstanceName: String
+    WorkspaceName: String
+    ProcessorName: String
+    Pipeline: String
+    DesiredState: String
+    Options: StreamsOptions
+    Timeouts: Timeouts
+    DeleteOnCreateTimeout: Boolean
+
+ +## Properties + +#### Profile + +Profile used to provide credentials information, (a secret with the cfn/atlas/profile/{Profile}, is required), if not provided default is used + +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### ProjectId + +Unique 24-hexadecimal digit string that identifies your project. + +**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups. + +_Required_: Yes + +_Type_: String + +_Minimum Length_: 24 + +_Maximum Length_: 24 + +_Pattern_: ^([a-f0-9]{24})$ + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### InstanceName + +Label that identifies the stream processing workspace. This field is deprecated in favor of WorkspaceName. Exactly one of InstanceName or WorkspaceName must be provided. + +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### WorkspaceName + +Label that identifies the stream processing workspace. This is the preferred field name. Exactly one of InstanceName or WorkspaceName must be provided. + +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### ProcessorName + +Label that identifies the stream processor. + +_Required_: Yes + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### Pipeline + +Stream aggregation pipeline you want to apply to your streaming data. This should be a JSON-encoded array of pipeline stages. Refer to MongoDB Atlas Docs for more information on stream aggregation pipelines. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### DesiredState + +The desired state of the stream processor. Used to start or stop the Stream Processor. Valid values are CREATED, STARTED or STOPPED. When a Stream Processor is created without specifying the desired state, it will default to CREATED state. When a Stream Processor is updated without specifying the desired state, it will default to the Previous state. + +**NOTE** When a Stream Processor is updated without specifying the desired state, it is stopped and then restored to previous state upon update completion. + +_Required_: No + +_Type_: String + +_Allowed Values_: CREATED | STARTED | STOPPED + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Options + +Optional configuration for the stream processor. + +_Required_: No + +_Type_: StreamsOptions + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Timeouts + +Configurable timeouts for stream processor operations. + +_Required_: No + +_Type_: Timeouts + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### DeleteOnCreateTimeout + +Indicates whether to delete the resource being created if a timeout is reached when waiting for completion. When set to `true` and timeout occurs, it triggers the deletion and returns immediately without waiting for deletion to complete. When set to `false`, the timeout will not trigger resource deletion. If you suspect a transient error when the value is `true`, wait before retrying to allow resource deletion to finish. Default is `true`. + +_Required_: No + +_Type_: Boolean + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +## Return Values + +### Fn::GetAtt + +The `Fn::GetAtt` intrinsic function returns a value for a specified attribute of this type. The following are the available attributes and sample return values. + +For more information about using the `Fn::GetAtt` intrinsic function, see [Fn::GetAtt](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/intrinsic-function-reference-getatt.html). + +#### Id + +Unique 24-hexadecimal character string that identifies the stream processor. + +#### Stats + +The stats associated with the stream processor as a JSON string. Refer to the MongoDB Atlas Docs for more information. + +#### State + +The actual current state of the stream processor as returned by the Atlas API. Commonly occurring states are 'CREATED', 'STARTED', 'STOPPED' and 'FAILED'. This is a read-only property that reflects the real-time state of the processor. + diff --git a/cfn-resources/stream-processor/docs/streamsdlq.md b/cfn-resources/stream-processor/docs/streamsdlq.md new file mode 100644 index 000000000..e99782f57 --- /dev/null +++ b/cfn-resources/stream-processor/docs/streamsdlq.md @@ -0,0 +1,58 @@ +# MongoDB::Atlas::StreamProcessor StreamsDLQ + +Dead letter queue for the stream processor. Refer to the MongoDB Atlas Docs for more information. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Coll" : String,
+    "ConnectionName" : String,
+    "Db" : String
+}
+
+ +### YAML + +
+Coll: String
+ConnectionName: String
+Db: String
+
+ +## Properties + +#### Coll + +Name of the collection to use for the DLQ. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### ConnectionName + +Name of the connection to write DLQ messages to. Must be an Atlas connection. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Db + +Name of the database to use for the DLQ. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-processor/docs/streamsoptions.md b/cfn-resources/stream-processor/docs/streamsoptions.md new file mode 100644 index 000000000..015dd98f3 --- /dev/null +++ b/cfn-resources/stream-processor/docs/streamsoptions.md @@ -0,0 +1,34 @@ +# MongoDB::Atlas::StreamProcessor StreamsOptions + +Optional configuration for the stream processor. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Dlq" : StreamsDLQ
+}
+
+ +### YAML + +
+Dlq: StreamsDLQ
+
+ +## Properties + +#### Dlq + +Dead letter queue for the stream processor. Refer to the MongoDB Atlas Docs for more information. + +_Required_: Yes + +_Type_: StreamsDLQ + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-processor/docs/timeouts.md b/cfn-resources/stream-processor/docs/timeouts.md new file mode 100644 index 000000000..08c397cd8 --- /dev/null +++ b/cfn-resources/stream-processor/docs/timeouts.md @@ -0,0 +1,34 @@ +# MongoDB::Atlas::StreamProcessor Timeouts + +Configurable timeouts for stream processor operations. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Create" : String
+}
+
+ +### YAML + +
+Create: String
+
+ +## Properties + +#### Create + +Timeout for create operation in Go duration format (e.g., '5m', '10s'). Default is 20 minutes. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json new file mode 100644 index 000000000..6f9778809 --- /dev/null +++ b/cfn-resources/stream-processor/mongodb-atlas-streamprocessor.json @@ -0,0 +1,147 @@ +{ + "typeName": "MongoDB::Atlas::StreamProcessor", + "description": "Returns, adds, edits, and removes Atlas Stream Processors.", + "sourceUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources.git", + "definitions": { + "StreamsDLQ": { + "type": "object", + "description": "Dead letter queue for the stream processor. Refer to the MongoDB Atlas Docs for more information.", + "properties": { + "Coll": { + "type": "string", + "description": "Name of the collection to use for the DLQ." + }, + "ConnectionName": { + "type": "string", + "description": "Name of the connection to write DLQ messages to. Must be an Atlas connection." + }, + "Db": { + "type": "string", + "description": "Name of the database to use for the DLQ." + } + }, + "required": ["Coll", "ConnectionName", "Db"], + "additionalProperties": false + }, + "StreamsOptions": { + "type": "object", + "description": "Optional configuration for the stream processor.", + "properties": { + "Dlq": { + "$ref": "#/definitions/StreamsDLQ" + } + }, + "required": ["Dlq"], + "additionalProperties": false + }, + "Timeouts": { + "type": "object", + "description": "Configurable timeouts for stream processor operations.", + "properties": { + "Create": { + "type": "string", + "description": "Timeout for create operation in Go duration format (e.g., '5m', '10s'). Default is 20 minutes." + } + }, + "additionalProperties": false + } + }, + "properties": { + "Profile": { + "type": "string", + "description": "Profile used to provide credentials information, (a secret with the cfn/atlas/profile/{Profile}, is required), if not provided default is used", + "default": "default" + }, + "ProjectId": { + "type": "string", + "description": "Unique 24-hexadecimal digit string that identifies your project. \n\n**NOTE**: Groups and projects are synonymous terms. Your group id is the same as your project id. For existing groups, your group/project id remains the same. The resource and corresponding endpoints use the term groups.", + "maxLength": 24, + "minLength": 24, + "pattern": "^([a-f0-9]{24})$" + }, + "InstanceName": { + "type": "string", + "description": "Label that identifies the stream processing workspace. This field is deprecated in favor of WorkspaceName. Exactly one of InstanceName or WorkspaceName must be provided." + }, + "WorkspaceName": { + "type": "string", + "description": "Label that identifies the stream processing workspace. This is the preferred field name. Exactly one of InstanceName or WorkspaceName must be provided." + }, + "ProcessorName": { + "type": "string", + "description": "Label that identifies the stream processor." + }, + "Pipeline": { + "type": "string", + "description": "Stream aggregation pipeline you want to apply to your streaming data. This should be a JSON-encoded array of pipeline stages. Refer to MongoDB Atlas Docs for more information on stream aggregation pipelines." + }, + "DesiredState": { + "type": "string", + "description": "The desired state of the stream processor. Used to start or stop the Stream Processor. Valid values are CREATED, STARTED or STOPPED. When a Stream Processor is created without specifying the desired state, it will default to CREATED state. When a Stream Processor is updated without specifying the desired state, it will default to the Previous state.\n\n**NOTE** When a Stream Processor is updated without specifying the desired state, it is stopped and then restored to previous state upon update completion.", + "enum": ["CREATED", "STARTED", "STOPPED"] + }, + "State": { + "type": "string", + "description": "The actual current state of the stream processor as returned by the Atlas API. Commonly occurring states are 'CREATED', 'STARTED', 'STOPPED' and 'FAILED'. This is a read-only property that reflects the real-time state of the processor." + }, + "Options": { + "$ref": "#/definitions/StreamsOptions" + }, + "Id": { + "type": "string", + "description": "Unique 24-hexadecimal character string that identifies the stream processor." + }, + "Stats": { + "type": "string", + "description": "The stats associated with the stream processor as a JSON string. Refer to the MongoDB Atlas Docs for more information." + }, + "Timeouts": { + "$ref": "#/definitions/Timeouts", + "description": "Configurable timeouts for stream processor operations." + }, + "DeleteOnCreateTimeout": { + "type": "boolean", + "description": "Indicates whether to delete the resource being created if a timeout is reached when waiting for completion. When set to `true` and timeout occurs, it triggers the deletion and returns immediately without waiting for deletion to complete. When set to `false`, the timeout will not trigger resource deletion. If you suspect a transient error when the value is `true`, wait before retrying to allow resource deletion to finish. Default is `true`." + } + }, + "additionalProperties": false, + "required": ["ProjectId", "ProcessorName", "Pipeline"], + "readOnlyProperties": [ + "/properties/Id", + "/properties/Stats", + "/properties/State" + ], + "writeOnlyProperties": ["/properties/DeleteOnCreateTimeout"], + "primaryIdentifier": [ + "/properties/ProjectId", + "/properties/InstanceName", + "/properties/WorkspaceName", + "/properties/ProcessorName", + "/properties/Profile" + ], + "createOnlyProperties": [ + "/properties/ProjectId", + "/properties/InstanceName", + "/properties/WorkspaceName", + "/properties/ProcessorName", + "/properties/Profile" + ], + "handlers": { + "create": { + "permissions": ["secretsmanager:GetSecretValue"] + }, + "read": { + "permissions": ["secretsmanager:GetSecretValue"] + }, + "update": { + "permissions": ["secretsmanager:GetSecretValue"] + }, + "delete": { + "permissions": ["secretsmanager:GetSecretValue"] + } + }, + "documentationUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources/blob/master/cfn-resources/stream-processor/README.md", + "tagging": { + "taggable": false + } +} diff --git a/cfn-resources/stream-processor/resource-role.yaml b/cfn-resources/stream-processor/resource-role.yaml new file mode 100644 index 000000000..bc6022d7d --- /dev/null +++ b/cfn-resources/stream-processor/resource-role.yaml @@ -0,0 +1,38 @@ +AWSTemplateFormatVersion: "2010-09-09" +Description: > + This CloudFormation template creates a role assumed by CloudFormation + during CRUDL operations to mutate resources on behalf of the customer. + +Resources: + ExecutionRole: + Type: AWS::IAM::Role + Properties: + MaxSessionDuration: 8400 + AssumeRolePolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Principal: + Service: resources.cloudformation.amazonaws.com + Action: sts:AssumeRole + Condition: + StringEquals: + aws:SourceAccount: + Ref: AWS::AccountId + StringLike: + aws:SourceArn: + Fn::Sub: arn:${AWS::Partition}:cloudformation:${AWS::Region}:${AWS::AccountId}:type/resource/MongoDB-Atlas-StreamProcessor/* + Path: "/" + Policies: + - PolicyName: ResourceTypePolicy + PolicyDocument: + Version: '2012-10-17' + Statement: + - Effect: Allow + Action: + - "secretsmanager:GetSecretValue" + Resource: "*" +Outputs: + ExecutionRoleArn: + Value: + Fn::GetAtt: ExecutionRole.Arn diff --git a/cfn-resources/stream-processor/template.yml b/cfn-resources/stream-processor/template.yml new file mode 100644 index 000000000..ad114f643 --- /dev/null +++ b/cfn-resources/stream-processor/template.yml @@ -0,0 +1,27 @@ +AWSTemplateFormatVersion: "2010-09-09" +Transform: AWS::Serverless-2016-10-31 +Description: AWS SAM template for the MongoDB::Atlas::StreamProcessor resource type + +Globals: + Function: + Timeout: 180 # docker start-up times can be long for SAM CLI + MemorySize: 256 + +Resources: + TypeFunction: + Type: AWS::Serverless::Function + Properties: + Handler: bootstrap + Runtime: provided.al2 + CodeUri: bin/ + + TestEntrypoint: + Type: AWS::Serverless::Function + Properties: + Handler: bootstrap + Runtime: provided.al2 + CodeUri: bin/ + Environment: + Variables: + MODE: Test + diff --git a/cfn-resources/stream-processor/test/README.md b/cfn-resources/stream-processor/test/README.md new file mode 100644 index 000000000..4e5474e71 --- /dev/null +++ b/cfn-resources/stream-processor/test/README.md @@ -0,0 +1,179 @@ +# MongoDB::Atlas::StreamProcessor + +## Impact + +The following components use this resource and are potentially impacted by any changes. They should also be validated to ensure the changes do not cause a regression. + +- Stream Processor L1 CDK constructor + +## Prerequisites + +### Resources needed to run the manual QA + +All resources are created as part of `cfn-testing-helper.sh`: + +- Atlas Project +- Atlas Stream Instance/Workspace (LONG-RUNNING operation, can take 10-30+ minutes) +- Cluster (for DLQ connection testing - inputs_3) +- Stream Connection (for DLQ connection testing - inputs_3) + +**IMPORTANT**: Stream Instance/Workspace creation is a LONG-RUNNING operation that can take 10-30+ minutes. The `cfn-test-create-inputs.sh` script will create the workspace and wait for it to be ready before proceeding. + +## Manual QA + +Please follow the steps in [TESTING.md](../../../TESTING.md). + +### Success criteria when testing the resource + +#### 1. Resource Creation Verification + +A Stream Processor should be created in the specified test project for the specified Atlas Stream workspace/instance: + +**Atlas UI Verification:** + +- Navigate to Atlas UI → Your Project → Stream Processing +- Select the stream workspace/instance used in the test +- Go to the **Processors** tab +- Verify the processor appears with: + - **Name**: Matches the `ProcessorName` from the test input + - **State**: Matches the `State` in the template (CREATED, STARTED, or STOPPED) + - **Pipeline**: Click on the processor to view details and verify: + - Pipeline stages match the `Pipeline` configuration in the template + - Source connection name is correct + - Merge target connection, database, and collection are correct + +**Atlas CLI Verification:** + +```bash +atlas streams processors describe \ + --instance \ + --projectId +``` + +- Verify `id` field is present (matches CloudFormation `Id` attribute) +- Verify `name` matches `ProcessorName` +- Verify `state` matches `State` parameter +- Verify `pipeline` array matches the `Pipeline` JSON string + +#### 2. DLQ Configuration Verification (inputs_3) + +For processors with DLQ configuration: + +- In Atlas UI: Verify DLQ settings are displayed in processor details +- Via Atlas CLI: Verify `options.dlq` object contains: + - `connectionName`: Matches `Options.Dlq.ConnectionName` + - `db`: Matches `Options.Dlq.Db` + - `coll`: Matches `Options.Dlq.Coll` + +#### 3. Backward Compatibility Testing + +Test both field names work correctly: + +- **Test with `WorkspaceName`** (preferred field): + - Create processor using `WorkspaceName` parameter + - Verify processor is created successfully + - Verify both `WorkspaceName` and `InstanceName` are set in returned model (for primary identifier) +- **Test with `InstanceName`** (deprecated field): + - Create processor using `InstanceName` parameter + - Verify processor is created successfully + - Verify both `WorkspaceName` and `InstanceName` are set in returned model + - Verify `WorkspaceName` is automatically set from `InstanceName` for forward compatibility + +#### 4. State Transition Testing + +Test all valid state transitions: + +- **Create with `State: CREATED`**: + - Verify processor is created in CREATED state + - Verify processor does not start processing automatically +- **Create with `State: STARTED`**: + - Verify processor is created and transitions to STARTED state + - Verify this is a long-running operation (may take several minutes) + - Verify callback-based state management handles the transition +- **Update state from CREATED to STARTED**: + - Verify processor stops (if needed) before update + - Verify processor starts after update completes + - Verify state transition is successful +- **Update state from STARTED to STOPPED**: + - Verify processor stops before update + - Verify processor remains stopped after update + - Verify state transition is successful + +#### 5. Timeout and Cleanup Behavior + +- **Verify `Timeouts.Create` is respected**: + - Set a short timeout (e.g., 1 minute) for a processor that takes longer to start + - Verify timeout is triggered after the specified duration +- **Verify `DeleteOnCreateTimeout` behavior**: + - When `DeleteOnCreateTimeout: true` and timeout occurs: + - Verify processor deletion is triggered + - Verify resource is cleaned up from Atlas + - When `DeleteOnCreateTimeout: false` and timeout occurs: + - Verify processor is not deleted + - Verify resource remains in Atlas (may be in partial state) + +#### 6. Primary Identifier Verification + +Verify all primary identifier fields are present in returned models: + +- `ProjectId`: Always present +- `WorkspaceName`: Always present (set from `InstanceName` if needed) +- `InstanceName`: Always present (set from `WorkspaceName` if needed) +- `ProcessorName`: Always present +- `Profile`: Always present + +This is critical for CloudFormation to properly track the resource. + +#### 7. General CFN Resource Success Criteria + +Ensure general [CFN resource success criteria](../../../TESTING.md#success-criteria-when-testing-the-resource) for this resource is met: + +- All CRUD operations work correctly +- Read-after-Create returns correct values +- Update operations preserve primary identifier +- Delete operations clean up resources +- Error handling is appropriate + +## Important Links + +- [API Documentation](https://www.mongodb.com/docs/api/doc/atlas-admin-api-v2/group/endpoint-streams) +- [Resource Usage Documentation](https://www.mongodb.com/docs/atlas/atlas-sp/overview/) + +## Unit Testing Locally + +The local tests are integrated with the AWS `sam local` and `cfn invoke` tooling features: + +``` +sam local start-lambda --skip-pull-image +``` + +then in another shell: + +```bash +repo_root=$(git rev-parse --show-toplevel) +cd ${repo_root}/cfn-resources/stream-processor +cfn invoke resource CREATE stream-processor-sample-cfn-request.json +cfn invoke resource DELETE stream-processor-sample-cfn-request.json +cd - +``` + +Both CREATE & DELETE tests must pass. + +## Test Input Files + +The test directory contains template files (with `.template.json` extension) that are used to generate the actual test input files in the `inputs/` directory. The template files are: + +- `inputs_1_create.template.json` / `inputs_1_update.template.json`: Basic stream processor with WorkspaceName, CREATED state, using `$merge` (Sample → Cluster) +- `inputs_2_create.template.json` / `inputs_2_update.template.json`: Stream processor with STARTED state, timeout configuration, and DeleteOnCreateTimeout, using `$merge` (Sample → Cluster) +- `inputs_3_create.template.json` / `inputs_3_update.template.json`: Stream processor with InstanceName (backward compatibility) and DLQ options, using `$merge` (Cluster → Cluster with same connection) + +When `cfn-test-create-inputs.sh` is run, these template files are processed and the generated files (without "template." in the name) are placed in the `inputs/` directory: + +- `inputs/inputs_1_create.json`, `inputs/inputs_1_update.json`, etc. + +All input files respect: + +- AWS-only behavior (no Azure/GCP-only parameters) +- Required fields: ProjectId, ProcessorName, Pipeline +- Backward compatibility: Supports both WorkspaceName and InstanceName +- Schema validation: All fields match the final CFN schema diff --git a/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh b/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh new file mode 100755 index 000000000..acdc5bdda --- /dev/null +++ b/cfn-resources/stream-processor/test/cfn-test-create-inputs.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash +# cfn-test-create-inputs.sh +# +# This tool generates json files in the inputs/ for `cfn test`. +# + +set -euo pipefail + +rm -rf inputs +mkdir inputs + +#set profile +profile="default" +if [ ${MONGODB_ATLAS_PROFILE+x} ]; then + echo "profile set to ${MONGODB_ATLAS_PROFILE}" + profile=${MONGODB_ATLAS_PROFILE} +fi + +projectName="${1:-$PROJECT_NAME}" +echo "$projectName" +projectId=$(atlas projects list --output json | jq --arg NAME "${projectName}" -r '.results[] | select(.name==$NAME) | .id') +if [ -z "$projectId" ]; then + projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') + + echo -e "Created project \"${projectName}\" with id: ${projectId}\n" +else + echo -e "FOUND project \"${projectName}\" with id: ${projectId}\n" +fi +echo -e "=====\nrun this command to clean up\n=====\nmongocli iam projects delete ${projectId} --force\n=====" + +# Create Stream Instance/Workspace (this is a LONG-RUNNING operation, can take 10-30+ minutes) +workspaceName="stream-workspace-$(date +%s)-$RANDOM" +cloudProvider="AWS" + +echo -e "Creating Stream Instance/Workspace \"${workspaceName}\" (this may take 10-30+ minutes)...\n" +atlas streams instances create "${workspaceName}" --projectId "${projectId}" --region VIRGINIA_USA --provider ${cloudProvider} +echo -e "Waiting for Stream Instance/Workspace \"${workspaceName}\" to be ready...\n" +# Poll until the stream instance is ready (watch command doesn't exist for stream instances) +while true; do + hostnames=$(atlas streams instances describe "${workspaceName}" --projectId "${projectId}" --output json 2>/dev/null | jq -r '.hostnames[]? // empty' 2>/dev/null | head -1) + if [ -n "$hostnames" ]; then + echo -e "Stream Instance/Workspace \"${workspaceName}\" is ready\n" + break + fi + sleep 10 +done + +# For inputs_3 (DLQ testing), we need a cluster and stream connection +# Create cluster for DLQ connection (if needed) +clusterName="cluster-$(date +%s)-$RANDOM" +connectionName="stream-connection-$(date +%s)-$RANDOM" + +echo -e "Creating Cluster \"${clusterName}\" for DLQ connection...\n" +atlas clusters create "${clusterName}" --projectId "${projectId}" --backup --provider AWS --region US_EAST_1 --members 3 --tier M10 --diskSizeGB 10 --output=json +atlas clusters watch "${clusterName}" --projectId "${projectId}" +echo -e "Created Cluster \"${clusterName}\"\n" + +echo -e "Creating Stream Connection \"${connectionName}\" for DLQ...\n" +# Create temporary JSON file for connection configuration using jq (consistent with rest of script) +connectionConfig=$(mktemp).json +jq -n \ + --arg type "Cluster" \ + --arg clusterName "${clusterName}" \ + '{ + "type": $type, + "clusterName": $clusterName, + "dbRoleToExecute": { + "role": "atlasAdmin", + "type": "BUILT_IN" + } + }' > "${connectionConfig}" +atlas streams connections create "${connectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --file "${connectionConfig}" \ + --output=json +rm -f "${connectionConfig}" +echo -e "Created Stream Connection \"${connectionName}\"\n" + +# Create Sample connection for inputs_1 and inputs_2 (sample_stream_solar) +sampleConnectionName="sample_stream_solar" +echo -e "Creating Sample Stream Connection \"${sampleConnectionName}\" for inputs_1 and inputs_2...\n" +sampleConnectionConfig=$(mktemp).json +jq -n \ + --arg type "Sample" \ + '{ + "type": $type + }' > "${sampleConnectionConfig}" +# Check if connection already exists +if atlas streams connections describe "${sampleConnectionName}" --projectId "${projectId}" --instance "${workspaceName}" --output json >/dev/null 2>&1; then + echo "Sample connection \"${sampleConnectionName}\" already exists, skipping creation" +else + atlas streams connections create "${sampleConnectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --file "${sampleConnectionConfig}" \ + --output=json + echo -e "Created Sample Stream Connection \"${sampleConnectionName}\"\n" +fi +rm -f "${sampleConnectionConfig}" + +# Reuse the Cluster connection from inputs_3 for inputs_1 and inputs_2 sink (saves time/resources) + +# Generate input files +# Reuse connectionName from inputs_3 for inputs_1 and inputs_2 sink (saves creating another cluster) +# Also set InstanceName from WorkspaceName for primary identifier (both fields required) +WORDTOREMOVE="template." + +cd "$(dirname "$0")" || exit + +for inputFile in inputs_*.template.json; do + outputFile=${inputFile//$WORDTOREMOVE/} + + # Determine which placeholders to use based on file name + if [[ "$inputFile" == *"inputs_3"* ]]; then + # inputs_3 uses CONNECTION_NAME_PLACEHOLDER for both source and sink + jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Options.Dlq.ConnectionName?|=$connection_name + | .Pipeline?|=gsub("CONNECTION_NAME_PLACEHOLDER"; $connection_name)' \ + "$inputFile" >"../inputs/$outputFile" + else + # inputs_1 and inputs_2 use SINK_CONNECTION_PLACEHOLDER + jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg sink_connection_name "$connectionName" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .InstanceName?|=$workspace_name + | .Pipeline?|=gsub("SINK_CONNECTION_PLACEHOLDER"; $sink_connection_name)' \ + "$inputFile" >"../inputs/$outputFile" + fi +done + +cd .. +ls -l inputs + +echo -e "Test input files generated successfully in inputs/ directory\n" diff --git a/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh b/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh new file mode 100755 index 000000000..50efa3ecf --- /dev/null +++ b/cfn-resources/stream-processor/test/cfn-test-delete-inputs.sh @@ -0,0 +1,96 @@ +#!/usr/bin/env bash +# cfn-test-delete-inputs.sh +# +# This tool deletes the mongodb resources used for `cfn test` as inputs. +# + +set -euo pipefail + +function usage { + echo "usage:$0 " +} + +projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) +workspaceName=$(jq -r '.WorkspaceName // .InstanceName' ./inputs/inputs_1_create.json) +processorName1=$(jq -r '.ProcessorName' ./inputs/inputs_1_create.json) +processorName2=$(jq -r '.ProcessorName' ./inputs/inputs_2_create.json) +processorName3=$(jq -r '.ProcessorName' ./inputs/inputs_3_create.json) + +# Delete stream processors (if they exist) +for processorName in "$processorName1" "$processorName2" "$processorName3"; do + if [ -z "$processorName" ] || [ "$processorName" == "null" ] || [ "$processorName" == "" ]; then + continue + fi + if atlas streams processors delete "${processorName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --force 2>/dev/null; then + echo "deleted stream processor with name ${processorName}" + else + echo "failed to delete or stream processor '${processorName}' does not exist" + fi +done + +# Delete Sample connection (sample_stream_solar) if it exists +sampleConnectionName="sample_stream_solar" +if atlas streams connections delete "${sampleConnectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --force 2>/dev/null; then + echo "deleted sample stream connection with name ${sampleConnectionName}" +else + echo "failed to delete or sample stream connection '${sampleConnectionName}' does not exist" +fi + +# Get connection name from inputs_3 if it exists +if [ -f "./inputs/inputs_3_create.json" ]; then + connectionName=$(jq -r '.Options.Dlq.ConnectionName // empty' ./inputs/inputs_3_create.json) + if [ -n "$connectionName" ]; then + if atlas streams connections delete "${connectionName}" \ + --projectId "${projectId}" \ + --instance "${workspaceName}" \ + --force 2>/dev/null; then + echo "deleted stream connection with name ${connectionName}" + else + echo "failed to delete or stream connection '${connectionName}' does not exist" + fi + fi +fi + + +# Delete all clusters in the project (created for DLQ testing) +# The cluster name is not stored in input JSON, so we list and delete all clusters +# Clusters must be deleted before stream instance and project to avoid dependency conflicts +echo "Checking for clusters to delete in project ${projectId}..." +clusterList=$(atlas clusters list --projectId "${projectId}" --output json 2>/dev/null | jq -r '.results[]?.name // empty' 2>/dev/null || echo "") +if [ -n "$clusterList" ]; then + while IFS= read -r clusterName; do + if [ -n "$clusterName" ] && [ "$clusterName" != "null" ] && [ "$clusterName" != "" ]; then + if atlas cluster delete "${clusterName}" --projectId "${projectId}" --force 2>/dev/null; then + echo "deleting cluster with name ${clusterName}" + # Wait for cluster deletion to complete + atlas cluster watch "${clusterName}" --projectId "${projectId}" 2>/dev/null || true + else + echo "failed to delete or cluster '${clusterName}' does not exist" + fi + fi + done <<< "$clusterList" +else + echo "No clusters found in project" +fi + +# Delete stream instance/workspace (after clusters are deleted) +if atlas streams instances delete "${workspaceName}" --projectId "${projectId}" --force 2>/dev/null; then + echo "deleting stream instance/workspace with name ${workspaceName}" + # Wait for deletion to complete + atlas streams instances watch "${workspaceName}" --projectId "${projectId}" 2>/dev/null || true +else + echo "failed to delete or stream instance/workspace '${workspaceName}' does not exist" +fi + +#delete project +if atlas projects delete "$projectId" --force 2>/dev/null; then + echo "$projectId project deletion OK" +else + (echo "Failed cleaning project:$projectId" && exit 1) +fi diff --git a/cfn-resources/stream-processor/test/contract-testing/cfn-test-create.sh b/cfn-resources/stream-processor/test/contract-testing/cfn-test-create.sh new file mode 100755 index 000000000..4b795316e --- /dev/null +++ b/cfn-resources/stream-processor/test/contract-testing/cfn-test-create.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# This tool generates the resources and json files in the inputs/ for `cfn test`. +set -o errexit +set -o nounset +set -o pipefail + +projectName="cfn-test-bot-$(date +%s)-$RANDOM" + +# create project +projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') + +echo "projectId: $projectId" +echo "projectName: $projectName" + +./test/cfn-test-create-inputs.sh "$projectName" diff --git a/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh b/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh new file mode 100755 index 000000000..71286ddfb --- /dev/null +++ b/cfn-resources/stream-processor/test/contract-testing/cfn-test-delete.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +# This tool deletes the mongodb resources used for `cfn test` as inputs. +set -o errexit +set -o nounset +set -o pipefail + +projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) + +# delete project +if atlas projects delete "$projectId" --force; then + echo "$projectId project deletion OK" +else + (echo "Failed cleaning project: $projectId" && exit 1) +fi diff --git a/cfn-resources/stream-processor/test/inputs_1_create.template.json b/cfn-resources/stream-processor/test/inputs_1_create.template.json new file mode 100644 index 000000000..7e0a720ec --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_1_create.template.json @@ -0,0 +1,8 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-1", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "DesiredState": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/inputs_1_update.template.json b/cfn-resources/stream-processor/test/inputs_1_update.template.json new file mode 100644 index 000000000..7e0a720ec --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_1_update.template.json @@ -0,0 +1,8 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-1", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "DesiredState": "CREATED" +} diff --git a/cfn-resources/stream-processor/test/inputs_2_create.template.json b/cfn-resources/stream-processor/test/inputs_2_create.template.json new file mode 100644 index 000000000..3d2ac71a5 --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_2_create.template.json @@ -0,0 +1,12 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-2", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "DesiredState": "STARTED", + "Timeouts": { + "Create": "25m" + }, + "DeleteOnCreateTimeout": true +} diff --git a/cfn-resources/stream-processor/test/inputs_2_update.template.json b/cfn-resources/stream-processor/test/inputs_2_update.template.json new file mode 100644 index 000000000..2653d8846 --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_2_update.template.json @@ -0,0 +1,8 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "test-processor-2", + "Pipeline": "[{\"$source\": {\"connectionName\": \"sample_stream_solar\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"SINK_CONNECTION_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "DesiredState": "STOPPED" +} diff --git a/cfn-resources/stream-processor/test/inputs_3_create.template.json b/cfn-resources/stream-processor/test/inputs_3_create.template.json new file mode 100644 index 000000000..b9b2acbcf --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_3_create.template.json @@ -0,0 +1,16 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-3", + "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "DesiredState": "CREATED", + "Options": { + "Dlq": { + "Coll": "dlq-collection", + "ConnectionName": "", + "Db": "dlq-database" + } + } +} diff --git a/cfn-resources/stream-processor/test/inputs_3_update.template.json b/cfn-resources/stream-processor/test/inputs_3_update.template.json new file mode 100644 index 000000000..4eb1d4397 --- /dev/null +++ b/cfn-resources/stream-processor/test/inputs_3_update.template.json @@ -0,0 +1,16 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "InstanceName": "", + "ProcessorName": "test-processor-3", + "Pipeline": "[{\"$source\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"CONNECTION_NAME_PLACEHOLDER\", \"db\": \"test\", \"coll\": \"output\"}}}]", + "DesiredState": "CREATED", + "Options": { + "Dlq": { + "Coll": "dlq-collection-updated", + "ConnectionName": "", + "Db": "dlq-database-updated" + } + } +} diff --git a/cfn-resources/stream-processor/test/stream-processor-sample-cfn-request.json b/cfn-resources/stream-processor/test/stream-processor-sample-cfn-request.json new file mode 100644 index 000000000..434caeccc --- /dev/null +++ b/cfn-resources/stream-processor/test/stream-processor-sample-cfn-request.json @@ -0,0 +1,11 @@ +{ + "desiredResourceState": { + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ProcessorName": "sample-processor", + "Pipeline": "[{\"$match\": {\"status\": \"active\"}}]", + "DesiredState": "CREATED" + }, + "previousResourceState": {} +} diff --git a/cfn-resources/util/constants/constants.go b/cfn-resources/util/constants/constants.go index 4cd57df70..d9de1bd9d 100644 --- a/cfn-resources/util/constants/constants.go +++ b/cfn-resources/util/constants/constants.go @@ -157,4 +157,7 @@ const ( ConnectionName = "ConnectionName" Type = "Type" StreamConfig = "StreamConfig" + + ProcessorName = "ProcessorName" + Pipeline = "Pipeline" ) diff --git a/examples/atlas-streams/README.md b/examples/atlas-streams/README.md index 53ae05dad..54d0ac048 100644 --- a/examples/atlas-streams/README.md +++ b/examples/atlas-streams/README.md @@ -4,12 +4,9 @@ Atlas Stream Processing is composed of multiple components, and users can levera ### Resources supported by AWS CloudFormation -- `MongoDB::Atlas::StreamInstance`: Enables creating, modifying, and deleting Stream Instances. as part of this resource, a computed `hostnames` attribute is available for connecting to the created instance. +- `MongoDB::Atlas::StreamInstance`: Enables creating, modifying, and deleting Stream Instances. As part of this resource, a computed `hostnames` attribute is available for connecting to the created instance. - `MongoDB::Atlas::StreamConnection`: Enables creating, modifying, and deleting Stream Instance Connections, which serve as data sources and sinks for your instance. - -### Managing Stream Processors - -Once a stream instance and its connections have been defined, `Stream Processors` can be created to define how your data will be processed in your instance. There are currently no resources defined in CloudFormation to provide this configuration. To obtain information on how this can be configured refer to [Manage Stream Processors](https://www.mongodb.com/docs/atlas/atlas-sp/manage-stream-processor/#manage-stream-processors). +- `MongoDB::Atlas::StreamProcessor`: Enables creating, modifying, and deleting Stream Processors, which define how data is processed in your stream instance using aggregation pipelines. Connect to your stream instance defined in CloudFormation using the `hostnames` output attribute. -This value can then be used to connect to the stream instance using `mongosh`, as described in the [Get Started Tutorial](https://www.mongodb.com/docs/atlas/atlas-sp/tutorial/). +This value can then be used to connect to the stream instance using `mongosh`, as described in the [Get Started Tutorial](https://www.mongodb.com/docs/atlas/atlas-sp/tutorial/). diff --git a/examples/atlas-streams/stream-processor/README.md b/examples/atlas-streams/stream-processor/README.md new file mode 100644 index 000000000..627057fa5 --- /dev/null +++ b/examples/atlas-streams/stream-processor/README.md @@ -0,0 +1,135 @@ +# How to create a MongoDB::Atlas::StreamProcessor + +## Step 1: Activate the stream processor resource in cloudformation + +Step a: Create Role using [execution-role.yaml](../../execution-role.yaml) in examples folder. + +Step b: Search for Mongodb::Atlas::StreamProcessor resource. + + (CloudFormation > Public extensions > choose 'Third party' > Search with " Execution name prefix = MongoDB " ) + +Step c: Select and activate +Enter the RoleArn that is created in step 1. + +Your StreamProcessor Resource is ready to use. + +## Step 2: Choose a template based on your use case + +### Example 1: Basic Stream Processor ([stream-processor.json](stream-processor.json)) + +Creates a stream processor that reads from a source connection and merges data into a cluster connection. This example uses `$merge` to write data to a regular MongoDB collection. + +**Use cases:** + +- Sample data to cluster (e.g., using `sample_stream_solar`) +- Cluster to cluster data streaming +- Simple data replication + +**Parameters:** + +1. **ProjectId** - Atlas Project Id (24 hexadecimal characters) +2. **WorkspaceName** - Name of your stream instance/workspace +3. **ProcessorName** - Unique name for the stream processor +4. **SourceConnectionName** - Name of the source connection: + - For sample data: `sample_stream_solar` + - For cluster source: Your cluster connection name +5. **SinkConnectionName** - Name of the sink cluster connection (must be a cluster connection) +6. **SinkDatabase** - Target database name (optional, default: `test`) +7. **SinkCollection** - Target collection name (optional, default: `output`) +8. **DesiredState** - Desired state of the processor: `CREATED`, `STOPPED`, or `STARTED` (optional, default: `CREATED`) +9. **Profile** - Secret Manager Profile for Atlas credentials (optional, default: `default`) + +**Pipeline stages:** + +- `$source` - Reads from the source connection +- `$merge` - Merges data into the target cluster connection (for regular collections) + +### Example 2: Stream Processor with Dead Letter Queue ([stream-processor-dlq.json](stream-processor-dlq.json)) + +Creates a stream processor with Dead Letter Queue (DLQ) configuration. Failed messages are automatically sent to a DLQ collection for error handling and debugging. + +**Additional Parameters (beyond Example 1):** + +10. **DlqConnectionName** - Name of the DLQ connection (must be a cluster connection) +11. **DlqDatabase** - DLQ database name (optional, default: `dlq`) +12. **DlqCollection** - DLQ collection name (optional, default: `dlq-messages`) + +**Pipeline stages:** + +- `$source` - Reads from the source connection +- `$merge` - Merges data into the target cluster connection (for regular collections) +- **Options.Dlq** - Configured to capture failed messages + +### Example 3: Sample to Cluster with Time-Series Collection ([stream-processor-sample-emit.json](stream-processor-sample-emit.json)) + +Creates a stream processor that reads from a sample connection and emits data to a time-series collection in a cluster connection using `$emit`. + +**Use cases:** + +- Sample data to time-series collection +- Real-time time-series data ingestion +- IoT sensor data streaming + +**Parameters:** + +1. **ProjectId** - Atlas Project Id (24 hexadecimal characters) +2. **WorkspaceName** - Name of your stream instance/workspace +3. **ProcessorName** - Unique name for the stream processor +4. **SourceConnectionName** - Name of the source connection (default: `sample_stream_solar`) +5. **SinkConnectionName** - Name of the sink cluster connection (must be a cluster connection) +6. **SinkDatabase** - Target database name (optional, default: `sample`) +7. **SinkCollection** - Target time-series collection name (optional, default: `solar`) +8. **TimeField** - Field name containing timestamp (optional, default: `_ts`) +9. **DesiredState** - Desired state of the processor: `CREATED`, `STOPPED`, or `STARTED` (optional, default: `STARTED`) +10. **Profile** - Secret Manager Profile for Atlas credentials (optional, default: `default`) + +**Pipeline stages:** + +- `$source` - Reads from the sample connection +- `$emit` - Emits data to a time-series collection with timeseries configuration + +**Note:** The target collection must be a time-series collection. Ensure the collection exists with the correct time-series configuration before starting the processor. + +## Pipeline Stage Options + +### $source + +Reads data from a source connection. Supported sources: + +- **Sample connections**: `sample_stream_solar` (for testing) +- **Cluster connections**: Read from MongoDB collections + +### $emit + +Writes data to a target connection. Options: + +- **Cluster**: Write to MongoDB collections + - `connectionName` - Target cluster connection name + - `db` - Target database + - `coll` - Target collection + - `timeseries` (optional) - For time-series collections + - `timeField` - Field name containing timestamp + +### $merge + +Merges data into regular MongoDB collections. Use `$merge` for standard collections (non-timeseries). + +- **Cluster**: Merge into MongoDB collections + - `connectionName` - Target cluster connection name + - `db` - Target database + - `coll` - Target collection + - `into` - Object containing connection, database, and collection details + +**Note:** Use `$merge` for regular collections. Use `$emit` only for time-series collections (requires `timeseries` option). + +## State Management + +The `DesiredState` parameter controls the desired processor lifecycle: + +- **CREATED** - Processor is created but not running (default) +- **STARTED** - Processor is actively processing data +- **STOPPED** - Processor is stopped (can be restarted) + +The `State` output (read-only) reflects the actual current state of the processor as returned by the Atlas API. Common states include `CREATED`, `STARTED`, `STOPPED`, and `FAILED`. + +**Note:** When updating a processor, if the current state is `STARTED`, the processor will be stopped, updated, and then restarted if the `DesiredState` is `STARTED`. diff --git a/examples/atlas-streams/stream-processor/stream-processor-dlq.json b/examples/atlas-streams/stream-processor/stream-processor-dlq.json new file mode 100644 index 000000000..eff1e9158 --- /dev/null +++ b/examples/atlas-streams/stream-processor/stream-processor-dlq.json @@ -0,0 +1,129 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates a stream processor with Dead Letter Queue (DLQ) configuration. The processor uses a source connection and merges data into a cluster connection, with failed messages sent to a DLQ collection.", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id (24 hexadecimal characters)" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processing workspace" + }, + "ProcessorName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processor" + }, + "SourceConnectionName": { + "Type": "String", + "Description": "Name of the source stream connection" + }, + "SinkConnectionName": { + "Type": "String", + "Description": "Name of the sink stream connection (must be a cluster connection)" + }, + "SinkDatabase": { + "Type": "String", + "Default": "test", + "Description": "Name of the database for the sink connection" + }, + "SinkCollection": { + "Type": "String", + "Default": "output", + "Description": "Name of the collection for the sink connection" + }, + "DlqConnectionName": { + "Type": "String", + "Description": "Name of the DLQ connection (must be a cluster connection)" + }, + "DlqDatabase": { + "Type": "String", + "Default": "dlq", + "Description": "Name of the database for the DLQ" + }, + "DlqCollection": { + "Type": "String", + "Default": "dlq-messages", + "Description": "Name of the collection for the DLQ" + }, + "DesiredState": { + "Type": "String", + "Default": "CREATED", + "Description": "Desired state of the stream processor", + "AllowedValues": ["CREATED", "STARTED", "STOPPED"] + } + }, + "Resources": { + "StreamProcessor": { + "Type": "MongoDB::Atlas::StreamProcessor", + "Properties": { + "Profile": { + "Ref": "Profile" + }, + "ProjectId": { + "Ref": "ProjectId" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "ProcessorName": { + "Ref": "ProcessorName" + }, + "Pipeline": { + "Fn::Sub": [ + "[{\"$source\": {\"connectionName\": \"${SourceConnection}\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"${SinkConnection}\", \"db\": \"${SinkDb}\", \"coll\": \"${SinkColl}\"}}}]", + { + "SourceConnection": { + "Ref": "SourceConnectionName" + }, + "SinkConnection": { + "Ref": "SinkConnectionName" + }, + "SinkDb": { + "Ref": "SinkDatabase" + }, + "SinkColl": { + "Ref": "SinkCollection" + } + } + ] + }, + "DesiredState": { + "Ref": "DesiredState" + }, + "Options": { + "Dlq": { + "ConnectionName": { + "Ref": "DlqConnectionName" + }, + "Db": { + "Ref": "DlqDatabase" + }, + "Coll": { + "Ref": "DlqCollection" + } + } + } + } + } + }, + "Outputs": { + "ProcessorId": { + "Description": "Unique identifier of the stream processor", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "Id"] + } + }, + "ProcessorState": { + "Description": "Current state of the stream processor from Atlas API", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "State"] + } + } + } +} diff --git a/examples/atlas-streams/stream-processor/stream-processor-sample-emit.json b/examples/atlas-streams/stream-processor/stream-processor-sample-emit.json new file mode 100644 index 000000000..116db9015 --- /dev/null +++ b/examples/atlas-streams/stream-processor/stream-processor-sample-emit.json @@ -0,0 +1,111 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates a stream processor that reads from a sample connection and emits data to a time-series collection in a cluster connection using $emit.", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id (24 hexadecimal characters)" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processing workspace" + }, + "ProcessorName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processor" + }, + "SourceConnectionName": { + "Type": "String", + "Default": "sample_stream_solar", + "Description": "Name of the source stream connection (e.g., sample_stream_solar for sample data)" + }, + "SinkConnectionName": { + "Type": "String", + "Description": "Name of the sink stream connection (must be a cluster connection)" + }, + "SinkDatabase": { + "Type": "String", + "Default": "sample", + "Description": "Name of the database for the sink connection" + }, + "SinkCollection": { + "Type": "String", + "Default": "solar", + "Description": "Name of the time-series collection for the sink connection" + }, + "TimeField": { + "Type": "String", + "Default": "_ts", + "Description": "Field name containing the timestamp for the time-series collection" + }, + "DesiredState": { + "Type": "String", + "Default": "STARTED", + "Description": "Desired state of the stream processor", + "AllowedValues": ["CREATED", "STARTED", "STOPPED"] + } + }, + "Resources": { + "StreamProcessor": { + "Type": "MongoDB::Atlas::StreamProcessor", + "Properties": { + "Profile": { + "Ref": "Profile" + }, + "ProjectId": { + "Ref": "ProjectId" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "ProcessorName": { + "Ref": "ProcessorName" + }, + "Pipeline": { + "Fn::Sub": [ + "[{\"$source\": {\"connectionName\": \"${SourceConnection}\"}}, {\"$emit\": {\"connectionName\": \"${SinkConnection}\", \"db\": \"${SinkDb}\", \"coll\": \"${SinkColl}\", \"timeseries\": {\"timeField\": \"${TimeField}\"}}}]", + { + "SourceConnection": { + "Ref": "SourceConnectionName" + }, + "SinkConnection": { + "Ref": "SinkConnectionName" + }, + "SinkDb": { + "Ref": "SinkDatabase" + }, + "SinkColl": { + "Ref": "SinkCollection" + }, + "TimeField": { + "Ref": "TimeField" + } + } + ] + }, + "DesiredState": { + "Ref": "DesiredState" + } + } + } + }, + "Outputs": { + "ProcessorId": { + "Description": "Unique identifier of the stream processor", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "Id"] + } + }, + "ProcessorState": { + "Description": "Current state of the stream processor from Atlas API", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "State"] + } + } + } +} diff --git a/examples/atlas-streams/stream-processor/stream-processor.json b/examples/atlas-streams/stream-processor/stream-processor.json new file mode 100644 index 000000000..2093b7ebf --- /dev/null +++ b/examples/atlas-streams/stream-processor/stream-processor.json @@ -0,0 +1,102 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates a stream processor for a given stream workspace in the specified project. The processor reads from a source connection (sample data, cluster, or Kafka) and merges data into a cluster connection collection.", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id (24 hexadecimal characters)" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processing workspace" + }, + "ProcessorName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream processor" + }, + "SourceConnectionName": { + "Type": "String", + "Description": "Name of the source stream connection (e.g., sample_stream_solar for sample data, or a cluster/kafka connection name)" + }, + "SinkConnectionName": { + "Type": "String", + "Description": "Name of the sink stream connection (must be a cluster connection)" + }, + "SinkDatabase": { + "Type": "String", + "Default": "test", + "Description": "Name of the database for the sink connection" + }, + "SinkCollection": { + "Type": "String", + "Default": "output", + "Description": "Name of the collection for the sink connection" + }, + "DesiredState": { + "Type": "String", + "Default": "CREATED", + "Description": "Desired state of the stream processor", + "AllowedValues": ["CREATED", "STARTED", "STOPPED"] + } + }, + "Resources": { + "StreamProcessor": { + "Type": "MongoDB::Atlas::StreamProcessor", + "Properties": { + "Profile": { + "Ref": "Profile" + }, + "ProjectId": { + "Ref": "ProjectId" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "ProcessorName": { + "Ref": "ProcessorName" + }, + "Pipeline": { + "Fn::Sub": [ + "[{\"$source\": {\"connectionName\": \"${SourceConnection}\"}}, {\"$merge\": {\"into\": {\"connectionName\": \"${SinkConnection}\", \"db\": \"${SinkDb}\", \"coll\": \"${SinkColl}\"}}}]", + { + "SourceConnection": { + "Ref": "SourceConnectionName" + }, + "SinkConnection": { + "Ref": "SinkConnectionName" + }, + "SinkDb": { + "Ref": "SinkDatabase" + }, + "SinkColl": { + "Ref": "SinkCollection" + } + } + ] + }, + "DesiredState": { + "Ref": "DesiredState" + } + } + } + }, + "Outputs": { + "ProcessorId": { + "Description": "Unique identifier of the stream processor", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "Id"] + } + }, + "ProcessorState": { + "Description": "Current state of the stream processor from Atlas API", + "Value": { + "Fn::GetAtt": ["StreamProcessor", "State"] + } + } + } +}