From d491792e74a96216f6ccce0c4b323474272d87de Mon Sep 17 00:00:00 2001 From: sivaram-mongodb Date: Thu, 8 Jan 2026 11:11:55 +0530 Subject: [PATCH 01/10] feat: Update Stream Connection Resource --- cfn-resources/stream-connection/README.md | 50 ++- .../cmd/resource/mappings.go | 119 ++++-- .../cmd/resource/mappings_test.go | 367 +++++----------- .../stream-connection/cmd/resource/model.go | 34 +- .../cmd/resource/resource.go | 111 +++-- .../cmd/resource/resource_test.go | 392 ++++++++++++++++++ .../stream-connection/docs/README.md | 92 +++- cfn-resources/stream-connection/docs/aws.md | 34 ++ .../stream-connection/docs/headers.md | 32 ++ .../stream-connection/docs/networking.md | 32 ++ .../docs/streamskafkaauthentication.md | 76 +++- .../mongodb-atlas-streamconnection.json | 122 +++++- .../test/cfn-test-create-inputs.sh | 96 ++++- .../test/cfn-test-delete-inputs.sh | 23 +- .../test/inputs_1_create.json | 2 +- .../test/inputs_1_update.json | 2 +- .../test/inputs_2_create.json | 4 +- .../test/inputs_2_update.json | 4 +- .../test/inputs_3_create.json | 2 +- .../test/inputs_3_update.json | 2 +- .../test/inputs_4_create.json | 11 + .../test/inputs_4_update.json | 11 + .../test/inputs_5_create.json | 13 + .../test/inputs_5_update.json | 14 + .../test/inputs_6_create.json | 30 ++ .../test/inputs_6_update.json | 30 ++ .../atlas-streams/stream-connection/README.md | 258 ++++++++++++ .../aws-lambda-stream-connection.json | 77 ++++ .../cluster-stream-connection.json | 8 +- .../https-stream-connection.json | 79 ++++ .../kafka-oauth-stream-connection.json | 164 ++++++++ .../kafka-stream-connection.json | 8 +- .../sample-stream-connection.json | 8 +- 33 files changed, 1920 insertions(+), 387 deletions(-) create mode 100644 cfn-resources/stream-connection/cmd/resource/resource_test.go create mode 100644 cfn-resources/stream-connection/docs/aws.md create mode 100644 cfn-resources/stream-connection/docs/headers.md create mode 100644 cfn-resources/stream-connection/docs/networking.md create mode 100644 cfn-resources/stream-connection/test/inputs_4_create.json create mode 100644 cfn-resources/stream-connection/test/inputs_4_update.json create mode 100644 cfn-resources/stream-connection/test/inputs_5_create.json create mode 100644 cfn-resources/stream-connection/test/inputs_5_update.json create mode 100644 cfn-resources/stream-connection/test/inputs_6_create.json create mode 100644 cfn-resources/stream-connection/test/inputs_6_update.json create mode 100644 examples/atlas-streams/stream-connection/README.md create mode 100644 examples/atlas-streams/stream-connection/aws-lambda-stream-connection.json create mode 100644 examples/atlas-streams/stream-connection/https-stream-connection.json create mode 100644 examples/atlas-streams/stream-connection/kafka-oauth-stream-connection.json diff --git a/cfn-resources/stream-connection/README.md b/cfn-resources/stream-connection/README.md index 18f9db2c3..407b65df8 100644 --- a/cfn-resources/stream-connection/README.md +++ b/cfn-resources/stream-connection/README.md @@ -11,6 +11,52 @@ For instructions on setting up a profile, [see here](/README.md#mongodb-atlas-ap See the [resource docs](docs/README.md). Also refer [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials. -## Cloudformation Examples +## CloudFormation Examples -See the example [CFN Template](/examples/stream-connection/kafka-stream-connection.json) for example resource. +Example templates are available in the [examples directory](/examples/atlas-streams/stream-connection/): + +- **Cluster Connection**: [cluster-stream-connection.json](/examples/atlas-streams/stream-connection/cluster-stream-connection.json) - Connects a Stream Workspace to an Atlas cluster +- **Kafka Connection**: [kafka-stream-connection.json](/examples/atlas-streams/stream-connection/kafka-stream-connection.json) - Connects a Stream Workspace to a Kafka cluster +- **Sample Connection**: [sample-stream-connection.json](/examples/atlas-streams/stream-connection/sample-stream-connection.json) - Uses a sample dataset + +For detailed deployment and verification instructions, see the [examples README](/examples/atlas-streams/stream-connection/README.md). + +## Deployment + +### Prerequisites +1. An existing Atlas project +2. An existing Stream Workspace (create using `atlas streams instances create`) +3. For Cluster connections: An existing Atlas cluster +4. AWS credentials configured with CloudFormation permissions +5. Atlas API keys stored in AWS Secrets Manager + +### Deploy Example + +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-connection/cluster-stream-connection.json \ + --stack-name my-stream-connection \ + --parameter-overrides \ + ProjectId=YOUR_PROJECT_ID \ + WorkspaceName=YOUR_WORKSPACE_NAME \ + ConnectionName=my-connection \ + ClusterName=YOUR_CLUSTER_NAME \ + DbRole=atlasAdmin \ + DbRoleType=BUILT_IN \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +## Verification + +After deployment, verify the connection using Atlas CLI: + +```bash +# List all connections for the workspace +atlas streams connections list --projectId + +# Get specific connection details +atlas streams connections get --projectId +``` + +The connection should appear in the list with the correct type and configuration matching your CloudFormation template parameters. diff --git a/cfn-resources/stream-connection/cmd/resource/mappings.go b/cfn-resources/stream-connection/cmd/resource/mappings.go index b0e379b24..ab34c085a 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings.go @@ -15,26 +15,37 @@ package resource import ( - admin20231115014 "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20250312010/admin" "github.com/mongodb/mongodbatlas-cloudformation-resources/util" ) -func GetStreamConnectionModel(streamsConn *admin20231115014.StreamsConnection, currentModel *Model) *Model { - model := new(Model) +func GetStreamConnectionModel(streamsConn *admin.StreamsConnection, currentModel *Model) *Model { + var model *Model if currentModel != nil { model = currentModel + if model.WorkspaceName == nil && model.InstanceName != nil && *model.InstanceName != "" { + model.WorkspaceName = model.InstanceName + } + } else { + model = new(Model) } model.ConnectionName = streamsConn.Name model.Type = streamsConn.Type model.ClusterName = streamsConn.ClusterName + if streamsConn.ClusterGroupId != nil { + model.ClusterProjectId = streamsConn.ClusterGroupId + } model.BootstrapServers = streamsConn.BootstrapServers + if streamsConn.Url != nil { + model.Url = streamsConn.Url + } model.DbRoleToExecute = NewModelDBRoleToExecute(streamsConn.DbRoleToExecute) - model.Authentication = NewModelAuthentication(streamsConn.Authentication) + model.Authentication = NewModelAuthentication(streamsConn.Authentication, currentModel) model.Security = NewModelSecurity(streamsConn.Security) @@ -42,10 +53,29 @@ func GetStreamConnectionModel(streamsConn *admin20231115014.StreamsConnection, c model.Config = *streamsConn.Config } + if streamsConn.Headers != nil { + model.Headers = *streamsConn.Headers + } + + if streamsConn.Networking != nil && streamsConn.Networking.Access != nil { + model.Networking = &Networking{ + Access: &Access{ + Type: streamsConn.Networking.Access.Type, + ConnectionId: streamsConn.Networking.Access.ConnectionId, + }, + } + } + + if streamsConn.Aws != nil { + model.Aws = &Aws{ + RoleArn: streamsConn.Aws.RoleArn, + } + } + return model } -func NewModelDBRoleToExecute(dbRole *admin20231115014.DBRoleToExecute) *DBRoleToExecute { +func NewModelDBRoleToExecute(dbRole *admin.DBRoleToExecute) *DBRoleToExecute { if dbRole == nil { return nil } @@ -56,19 +86,25 @@ func NewModelDBRoleToExecute(dbRole *admin20231115014.DBRoleToExecute) *DBRoleTo } } -func NewModelAuthentication(authentication *admin20231115014.StreamsKafkaAuthentication) *StreamsKafkaAuthentication { +func NewModelAuthentication(authentication *admin.StreamsKafkaAuthentication, currentModel *Model) *StreamsKafkaAuthentication { if authentication == nil { return nil } - return &StreamsKafkaAuthentication{ - Mechanism: authentication.Mechanism, - Password: authentication.Password, - Username: authentication.Username, + authModel := &StreamsKafkaAuthentication{ + Mechanism: authentication.Mechanism, + Method: authentication.Method, + Username: authentication.Username, + TokenEndpointUrl: authentication.TokenEndpointUrl, + ClientId: authentication.ClientId, + Scope: authentication.Scope, + SaslOauthbearerExtensions: authentication.SaslOauthbearerExtensions, } + + return authModel } -func NewModelSecurity(security *admin20231115014.StreamsKafkaSecurity) *StreamsKafkaSecurity { +func NewModelSecurity(security *admin.StreamsKafkaSecurity) *StreamsKafkaSecurity { if security == nil { return nil } @@ -79,18 +115,23 @@ func NewModelSecurity(security *admin20231115014.StreamsKafkaSecurity) *StreamsK } } -func newStreamConnectionReq(model *Model) *admin20231115014.StreamsConnection { - streamConnReq := admin20231115014.StreamsConnection{ +func newStreamConnectionReq(model *Model) *admin.StreamsConnection { + streamConnReq := admin.StreamsConnection{ Name: model.ConnectionName, Type: model.Type, } - if util.SafeString(streamConnReq.Type) == ClusterConnectionType { + typeStr := util.SafeString(streamConnReq.Type) + + if typeStr == ClusterConnectionType { streamConnReq.ClusterName = model.ClusterName + if model.ClusterProjectId != nil { + streamConnReq.ClusterGroupId = model.ClusterProjectId + } streamConnReq.DbRoleToExecute = NewDBRoleToExecute(model.DbRoleToExecute) } - if util.SafeString(streamConnReq.Type) == KafkaConnectionType { + if typeStr == KafkaConnectionType { streamConnReq.BootstrapServers = model.BootstrapServers streamConnReq.Security = newStreamsKafkaSecurity(model.Security) streamConnReq.Authentication = newStreamsKafkaAuthentication(model.Authentication) @@ -98,41 +139,71 @@ func newStreamConnectionReq(model *Model) *admin20231115014.StreamsConnection { if model.Config != nil { streamConnReq.Config = &model.Config } + + if model.Networking != nil && model.Networking.Access != nil { + streamConnReq.Networking = &admin.StreamsKafkaNetworking{ + Access: &admin.StreamsKafkaNetworkingAccess{ + Type: model.Networking.Access.Type, + ConnectionId: model.Networking.Access.ConnectionId, + }, + } + } + } + + if typeStr == AWSLambdaType { + if model.Aws != nil { + streamConnReq.Aws = &admin.StreamsAWSConnectionConfig{ + RoleArn: model.Aws.RoleArn, + } + } + } + + if typeStr == HTTPSType { + streamConnReq.Url = model.Url + if model.Headers != nil { + streamConnReq.Headers = &model.Headers + } } return &streamConnReq } -func NewDBRoleToExecute(dbRoleToExecuteModel *DBRoleToExecute) *admin20231115014.DBRoleToExecute { +func NewDBRoleToExecute(dbRoleToExecuteModel *DBRoleToExecute) *admin.DBRoleToExecute { if dbRoleToExecuteModel == nil { return nil } - return &admin20231115014.DBRoleToExecute{ + return &admin.DBRoleToExecute{ Role: dbRoleToExecuteModel.Role, Type: dbRoleToExecuteModel.Type, } } -func newStreamsKafkaSecurity(securityModel *StreamsKafkaSecurity) *admin20231115014.StreamsKafkaSecurity { +func newStreamsKafkaSecurity(securityModel *StreamsKafkaSecurity) *admin.StreamsKafkaSecurity { if securityModel == nil { return nil } - return &admin20231115014.StreamsKafkaSecurity{ + return &admin.StreamsKafkaSecurity{ BrokerPublicCertificate: securityModel.BrokerPublicCertificate, Protocol: securityModel.Protocol, } } -func newStreamsKafkaAuthentication(authenticationModel *StreamsKafkaAuthentication) *admin20231115014.StreamsKafkaAuthentication { +func newStreamsKafkaAuthentication(authenticationModel *StreamsKafkaAuthentication) *admin.StreamsKafkaAuthentication { if authenticationModel == nil { return nil } - return &admin20231115014.StreamsKafkaAuthentication{ - Mechanism: authenticationModel.Mechanism, - Password: authenticationModel.Password, - Username: authenticationModel.Username, + return &admin.StreamsKafkaAuthentication{ + Mechanism: authenticationModel.Mechanism, + Method: authenticationModel.Method, + Username: authenticationModel.Username, + Password: authenticationModel.Password, + TokenEndpointUrl: authenticationModel.TokenEndpointUrl, + ClientId: authenticationModel.ClientId, + ClientSecret: authenticationModel.ClientSecret, + Scope: authenticationModel.Scope, + SaslOauthbearerExtensions: authenticationModel.SaslOauthbearerExtensions, } } diff --git a/cfn-resources/stream-connection/cmd/resource/mappings_test.go b/cfn-resources/stream-connection/cmd/resource/mappings_test.go index 3bf65a700..f59a3ec42 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings_test.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings_test.go @@ -1,4 +1,4 @@ -// Copyright 2024 MongoDB Inc +// Copyright 2026 MongoDB Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -17,294 +17,125 @@ package resource_test import ( "testing" - admin20231115014 "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20250312010/admin" "github.com/aws/smithy-go/ptr" "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-connection/cmd/resource" "github.com/stretchr/testify/assert" ) -func TestNewModelDBRoleToExecute(t *testing.T) { - tests := []struct { - input *admin20231115014.DBRoleToExecute - expected *resource.DBRoleToExecute - name string - }{ - { - name: "Nil Input", - input: nil, - expected: nil, - }, - { - name: "Valid Input", - input: &admin20231115014.DBRoleToExecute{ - Role: ptr.String("readWrite"), - Type: ptr.String("BUILT_IN"), - }, - expected: &resource.DBRoleToExecute{ - Role: ptr.String("readWrite"), - Type: ptr.String("BUILT_IN"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewModelDBRoleToExecute(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} +const ( + testRoleValue = "readWrite" + testRoleTypeValue = "BUILT_IN" + testUsername = "testuser111" + testMechanism = "PLAIN" + testProtocol = "SSL" + testCert = "testcert" + testCustomRole = "customroleadmin" + testCustomType = "CUSTOM" + testConnection = "TestConnection" + testCluster = "TestCluster" + testBootstrap = "local.example.com:9192" + testUser = "user1" + testSampleName = "sample_stream_solar" +) -func TestNewModelAuthentication(t *testing.T) { - tests := []struct { - input *admin20231115014.StreamsKafkaAuthentication - expected *resource.StreamsKafkaAuthentication - name string +func TestMappings(t *testing.T) { + testCases := map[string]struct { + testFunc func(*testing.T) }{ - { - name: "Nil Input", - input: nil, - expected: nil, - }, - { - name: "Valid Input", - input: &admin20231115014.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("testuser111"), - Password: ptr.String("testpassword"), + "NewModelDBRoleToExecute": { + testFunc: func(t *testing.T) { + t.Helper() + input := &admin.DBRoleToExecute{Role: ptr.String(testRoleValue), Type: ptr.String(testRoleTypeValue)} + result := resource.NewModelDBRoleToExecute(input) + assert.Equal(t, testRoleValue, *result.Role) + assert.Equal(t, testRoleTypeValue, *result.Type) + assert.Nil(t, resource.NewModelDBRoleToExecute(nil)) }, - expected: &resource.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("testuser111"), - Password: ptr.String("testpassword"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewModelAuthentication(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestNewModelSecurity(t *testing.T) { - tests := []struct { - input *admin20231115014.StreamsKafkaSecurity - expected *resource.StreamsKafkaSecurity - name string - }{ - { - name: "Nil Input", - input: nil, - expected: nil, }, - { - name: "Valid Input", - input: &admin20231115014.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("testcert"), - Protocol: ptr.String("SSL"), - }, - expected: &resource.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("testcert"), - Protocol: ptr.String("SSL"), + "NewModelAuthentication": { + testFunc: func(t *testing.T) { + t.Helper() + input := &admin.StreamsKafkaAuthentication{ + Mechanism: ptr.String(testMechanism), Username: ptr.String(testUsername), + Password: ptr.String("test-password-placeholder"), + } + result := resource.NewModelAuthentication(input, nil) + assert.Equal(t, testMechanism, *result.Mechanism) + assert.Equal(t, testUsername, *result.Username) + assert.Nil(t, result.Password) + assert.Nil(t, resource.NewModelAuthentication(nil, nil)) }, }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewModelSecurity(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestNewDBRoleToExecute(t *testing.T) { - tests := []struct { - input *resource.DBRoleToExecute - expected *admin20231115014.DBRoleToExecute - name string - }{ - { - name: "Nil Input", - input: nil, - expected: nil, - }, - { - name: "Valid Input", - input: &resource.DBRoleToExecute{ - Role: ptr.String("customroleadmin"), - Type: ptr.String("CUSTOM"), - }, - expected: &admin20231115014.DBRoleToExecute{ - Role: ptr.String("customroleadmin"), - Type: ptr.String("CUSTOM"), + "NewModelSecurity": { + testFunc: func(t *testing.T) { + t.Helper() + input := &admin.StreamsKafkaSecurity{ + BrokerPublicCertificate: ptr.String(testCert), Protocol: ptr.String(testProtocol), + } + result := resource.NewModelSecurity(input) + assert.Equal(t, testCert, *result.BrokerPublicCertificate) + assert.Equal(t, testProtocol, *result.Protocol) + assert.Nil(t, resource.NewModelSecurity(nil)) }, }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewDBRoleToExecute(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestGetStreamConnectionKafkaTypeModel(t *testing.T) { - streamsConnKafka := &admin20231115014.StreamsConnection{ - Name: ptr.String("TestConnection"), - Type: ptr.String("Kafka"), - BootstrapServers: ptr.String("local.example.com:9192"), - Authentication: &admin20231115014.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("user1"), - Password: ptr.String("passwrd"), - }, - Security: &admin20231115014.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("cert1"), - Protocol: ptr.String("SSL"), - }, - Config: &map[string]string{"retention.test": "60000"}, - } - - t.Run("With Nil Current Model", func(t *testing.T) { - result := resource.GetStreamConnectionModel(streamsConnKafka, nil) - - assert.NotNil(t, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, *streamsConnKafka.BootstrapServers, *result.BootstrapServers) - assert.Equal(t, *streamsConnKafka.Authentication.Mechanism, *result.Authentication.Mechanism) - assert.Equal(t, *streamsConnKafka.Security.Protocol, *result.Security.Protocol) - assert.Equal(t, map[string]string{"retention.test": "60000"}, result.Config) - }) - - t.Run("With Non-Null Current Model", func(t *testing.T) { - currentModel := &resource.Model{ - Profile: ptr.String("default"), - ProjectId: ptr.String("testProjectID"), - InstanceName: ptr.String("TestInstance"), - ConnectionName: ptr.String("TestConnection"), - Type: ptr.String("Kafka"), - BootstrapServers: ptr.String("local.example.com:9192"), - Authentication: &resource.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("user1"), - Password: ptr.String("passwrd"), + "NewDBRoleToExecute": { + testFunc: func(t *testing.T) { + t.Helper() + input := &resource.DBRoleToExecute{Role: ptr.String(testCustomRole), Type: ptr.String(testCustomType)} + result := resource.NewDBRoleToExecute(input) + assert.Equal(t, testCustomRole, *result.Role) + assert.Equal(t, testCustomType, *result.Type) + assert.Nil(t, resource.NewDBRoleToExecute(nil)) }, - Security: &resource.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("cert1"), - Protocol: ptr.String("SSL"), - }, - Config: map[string]string{"retention.test": "60000"}, - } - result := resource.GetStreamConnectionModel(streamsConnKafka, currentModel) - - assert.Equal(t, currentModel, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *currentModel.InstanceName, *result.InstanceName) - assert.Equal(t, *currentModel.Profile, *result.Profile) - assert.Equal(t, *currentModel.ProjectId, *result.ProjectId) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, *streamsConnKafka.BootstrapServers, *result.BootstrapServers) - assert.Equal(t, *streamsConnKafka.Authentication.Mechanism, *result.Authentication.Mechanism) - assert.Equal(t, *streamsConnKafka.Security.Protocol, *result.Security.Protocol) - }) -} - -func TestGetStreamConnectionClusterTypeModel(t *testing.T) { - streamsConnKafka := &admin20231115014.StreamsConnection{ - Name: ptr.String("TestConnection"), - Type: ptr.String("Cluster"), - ClusterName: ptr.String("TestCluster"), - DbRoleToExecute: &admin20231115014.DBRoleToExecute{ - Role: ptr.String("admin"), - Type: ptr.String("Custom"), }, - } - - t.Run("With Nil Current Model", func(t *testing.T) { - result := resource.GetStreamConnectionModel(streamsConnKafka, nil) - - assert.NotNil(t, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetRole(), *result.DbRoleToExecute.Role) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetType(), *result.DbRoleToExecute.Type) - }) - - t.Run("With Non-Null Current Model", func(t *testing.T) { - currentModel := &resource.Model{ - Profile: ptr.String("default"), - ProjectId: ptr.String("testProjectID"), - InstanceName: ptr.String("TestInstance"), - ConnectionName: ptr.String("TestConnection"), - Type: ptr.String("Kafka"), - ClusterName: ptr.String("TestCluster"), - DbRoleToExecute: &resource.DBRoleToExecute{ - Role: ptr.String("admin"), - Type: ptr.String("Custom"), - }, - } - result := resource.GetStreamConnectionModel(streamsConnKafka, currentModel) - - assert.Equal(t, currentModel, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *currentModel.InstanceName, *result.InstanceName) - assert.Equal(t, *currentModel.Profile, *result.Profile) - assert.Equal(t, *currentModel.ProjectId, *result.ProjectId) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetRole(), *result.DbRoleToExecute.Role) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetType(), *result.DbRoleToExecute.Type) - }) -} - -func TestGetStreamConnectionSampleTypeModel(t *testing.T) { - streamsConnSample := &admin20231115014.StreamsConnection{ - Name: ptr.String("sample_stream_solar"), - Type: ptr.String("Sample"), - } - testCases := []struct { - model *resource.Model - asserter func(input, result *resource.Model, a *assert.Assertions) - name string - }{ - { - name: "With Nil Current Model", - model: nil, - asserter: func(_, result *resource.Model, a *assert.Assertions) { - a.NotNil(result) - a.Equal(*streamsConnSample.Name, *result.ConnectionName) - a.Equal(*streamsConnSample.Type, *result.Type) - a.Nil(result.DbRoleToExecute) + "GetStreamConnectionModel_kafka": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.KafkaConnectionType), + BootstrapServers: ptr.String(testBootstrap), + Authentication: &admin.StreamsKafkaAuthentication{ + Mechanism: ptr.String(testMechanism), Username: ptr.String(testUser), + }, + Security: &admin.StreamsKafkaSecurity{Protocol: ptr.String(testProtocol)}, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.KafkaConnectionType, *result.Type) + assert.Equal(t, testBootstrap, *result.BootstrapServers) }, }, - { - name: "Sample Stream Solar dataset", - model: &resource.Model{ - Profile: ptr.String("default"), - ProjectId: ptr.String("testProjectID"), - InstanceName: ptr.String("TestInstance"), - ConnectionName: ptr.String("sample_stream_solar"), - Type: ptr.String("Sample"), + "GetStreamConnectionModel_cluster": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.ClusterConnectionType), + ClusterName: ptr.String(testCluster), + DbRoleToExecute: &admin.DBRoleToExecute{Role: ptr.String("admin"), Type: ptr.String("Custom")}, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.ClusterConnectionType, *result.Type) + assert.Equal(t, "admin", *result.DbRoleToExecute.Role) }, - asserter: func(input, result *resource.Model, a *assert.Assertions) { - a.Equal(*input.InstanceName, *result.InstanceName) - a.Equal(*input.Profile, *result.Profile) - a.Equal(*input.ProjectId, *result.ProjectId) - a.Equal(*input.ConnectionName, *result.ConnectionName) - a.Equal(*input.Type, *result.Type) + }, + "GetStreamConnectionModel_sample": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testSampleName), Type: ptr.String("Sample"), + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testSampleName, *result.ConnectionName) + assert.Equal(t, "Sample", *result.Type) + assert.Nil(t, result.DbRoleToExecute) }, }, } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := resource.GetStreamConnectionModel(streamsConnSample, tc.model) - tc.asserter(tc.model, result, assert.New(t)) - }) + + for name, tc := range testCases { + t.Run(name, tc.testFunc) } } diff --git a/cfn-resources/stream-connection/cmd/resource/model.go b/cfn-resources/stream-connection/cmd/resource/model.go index 0fe7b9839..6c30646f5 100644 --- a/cfn-resources/stream-connection/cmd/resource/model.go +++ b/cfn-resources/stream-connection/cmd/resource/model.go @@ -8,13 +8,19 @@ type Model struct { Profile *string `json:",omitempty"` ConnectionName *string `json:",omitempty"` InstanceName *string `json:",omitempty"` + WorkspaceName *string `json:",omitempty"` Type *string `json:",omitempty"` ClusterName *string `json:",omitempty"` + ClusterProjectId *string `json:",omitempty"` DbRoleToExecute *DBRoleToExecute `json:",omitempty"` Authentication *StreamsKafkaAuthentication `json:",omitempty"` BootstrapServers *string `json:",omitempty"` Security *StreamsKafkaSecurity `json:",omitempty"` Config map[string]string `json:",omitempty"` + Networking *Networking `json:",omitempty"` + Aws *Aws `json:",omitempty"` + Url *string `json:",omitempty"` + Headers map[string]string `json:",omitempty"` } // DBRoleToExecute is autogenerated from the json schema @@ -25,9 +31,15 @@ type DBRoleToExecute struct { // StreamsKafkaAuthentication is autogenerated from the json schema type StreamsKafkaAuthentication struct { - Mechanism *string `json:",omitempty"` - Username *string `json:",omitempty"` - Password *string `json:",omitempty"` + Mechanism *string `json:",omitempty"` + Method *string `json:",omitempty"` + Username *string `json:",omitempty"` + Password *string `json:",omitempty"` + TokenEndpointUrl *string `json:",omitempty"` + ClientId *string `json:",omitempty"` + ClientSecret *string `json:",omitempty"` + Scope *string `json:",omitempty"` + SaslOauthbearerExtensions *string `json:",omitempty"` } // StreamsKafkaSecurity is autogenerated from the json schema @@ -35,3 +47,19 @@ type StreamsKafkaSecurity struct { BrokerPublicCertificate *string `json:",omitempty"` Protocol *string `json:",omitempty"` } + +// Networking is autogenerated from the json schema +type Networking struct { + Access *Access `json:",omitempty"` +} + +// Access is autogenerated from the json schema +type Access struct { + Type *string `json:",omitempty"` + ConnectionId *string `json:",omitempty"` +} + +// Aws is autogenerated from the json schema +type Aws struct { + RoleArn *string `json:",omitempty"` +} diff --git a/cfn-resources/stream-connection/cmd/resource/resource.go b/cfn-resources/stream-connection/cmd/resource/resource.go index e72b42332..360fd07c2 100644 --- a/cfn-resources/stream-connection/cmd/resource/resource.go +++ b/cfn-resources/stream-connection/cmd/resource/resource.go @@ -19,9 +19,10 @@ import ( "fmt" "net/http" - admin20231115014 "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20250312010/admin" "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" "github.com/mongodb/mongodbatlas-cloudformation-resources/util" "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" @@ -32,15 +33,42 @@ import ( const ( ClusterConnectionType = "Cluster" KafkaConnectionType = "Kafka" + AWSLambdaType = "AWSLambda" + HTTPSType = "Https" ) -var CreateRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName, constants.Type} -var ReadRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName} -var UpdateRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName, constants.Type} -var DeleteRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName} -var ListRequiredFields = []string{constants.ProjectID, constants.InstanceName} +var CreateRequiredFields = []string{constants.ProjectID, constants.ConnectionName, constants.Type} +var ReadRequiredFields = []string{constants.ProjectID, constants.ConnectionName} +var UpdateRequiredFields = []string{constants.ProjectID, constants.ConnectionName, constants.Type} +var DeleteRequiredFields = []string{constants.ProjectID, constants.ConnectionName} +var ListRequiredFields = []string{constants.ProjectID} -func initEnvWithLatestClient(req handler.Request, currentModel *Model, requiredFields []string) (*admin20231115014.APIClient, *handler.ProgressEvent) { +func getWorkspaceOrInstanceName(model *Model) (*string, *handler.ProgressEvent) { + if model.WorkspaceName != nil && *model.WorkspaceName != "" { + return model.WorkspaceName, nil + } + if model.InstanceName != nil && *model.InstanceName != "" { + return model.InstanceName, nil + } + return nil, &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Either WorkspaceName or InstanceName must be provided", + HandlerErrorCode: string(types.HandlerErrorCodeInvalidRequest), + } +} + +func normalizeWorkspaceName(model *Model) { + if model != nil { + if model.WorkspaceName != nil && *model.WorkspaceName != "" { + return + } + if model.InstanceName != nil && *model.InstanceName != "" { + model.WorkspaceName = model.InstanceName + } + } +} + +var InitEnvWithLatestClient = func(req handler.Request, currentModel *Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { util.SetupLogger("mongodb-atlas-stream-connection") util.SetDefaultProfileIfNotDefined(¤tModel.Profile) @@ -49,15 +77,22 @@ func initEnvWithLatestClient(req handler.Request, currentModel *Model, requiredF return nil, errEvent } + normalizeWorkspaceName(currentModel) + client, peErr := util.NewAtlasClient(&req, currentModel.Profile) if peErr != nil { return nil, peErr } - return client.Atlas20231115014, nil + return client.AtlasSDK, nil } func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, CreateRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, CreateRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) if peErr != nil { return *peErr, nil } @@ -65,10 +100,9 @@ func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName streamConnectionReq := newStreamConnectionReq(currentModel) - streamConnResp, apiResp, err := conn.StreamsApi.CreateStreamConnection(ctx, *projectID, *instanceName, streamConnectionReq).Execute() + streamConnResp, apiResp, err := conn.StreamsApi.CreateStreamConnection(ctx, *projectID, *workspaceOrInstanceName, streamConnectionReq).Execute() if err != nil { return handleError(apiResp, constants.CREATE, err) } @@ -83,15 +117,19 @@ func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler } func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, ReadRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, ReadRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) if peErr != nil { return *peErr, nil } projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName connectionName := currentModel.ConnectionName - streamConnResp, apiResp, err := conn.StreamsApi.GetStreamConnection(context.Background(), *projectID, *instanceName, *connectionName).Execute() + streamConnResp, apiResp, err := conn.StreamsApi.GetStreamConnection(context.Background(), *projectID, *workspaceOrInstanceName, *connectionName).Execute() if err != nil { return handleError(apiResp, constants.READ, err) } @@ -105,7 +143,12 @@ func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.P } func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, UpdateRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, UpdateRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) if peErr != nil { return *peErr, nil } @@ -113,10 +156,9 @@ func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName connectionName := currentModel.ConnectionName streamConnectionReq := newStreamConnectionReq(currentModel) - streamConnResp, apiResp, err := conn.StreamsApi.UpdateStreamConnection(ctx, *projectID, *instanceName, *connectionName, streamConnectionReq).Execute() + streamConnResp, apiResp, err := conn.StreamsApi.UpdateStreamConnection(ctx, *projectID, *workspaceOrInstanceName, *connectionName, streamConnectionReq).Execute() if err != nil { return handleError(apiResp, constants.UPDATE, err) } @@ -131,7 +173,12 @@ func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler } func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, DeleteRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, DeleteRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) if peErr != nil { return *peErr, nil } @@ -139,9 +186,9 @@ func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName connectionName := currentModel.ConnectionName - if _, apiResp, err := conn.StreamsApi.DeleteStreamConnection(ctx, *projectID, *instanceName, *connectionName).Execute(); err != nil { + apiResp, err := conn.StreamsApi.DeleteStreamConnection(ctx, *projectID, *workspaceOrInstanceName, *connectionName).Execute() + if err != nil { return handleError(apiResp, constants.DELETE, err) } @@ -152,7 +199,12 @@ func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler } func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, ListRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, ListRequiredFields) + if peErr != nil { + return *peErr, nil + } + + workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) if peErr != nil { return *peErr, nil } @@ -160,9 +212,8 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName - accumulatedStreamConns, apiResp, err := getAllStreamConnections(ctx, conn, *projectID, *instanceName) + accumulatedStreamConns, apiResp, err := getAllStreamConnections(ctx, conn, *projectID, *workspaceOrInstanceName) if err != nil { return handleError(apiResp, constants.LIST, err) } @@ -171,7 +222,11 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P for i := range accumulatedStreamConns { model := GetStreamConnectionModel(&accumulatedStreamConns[i], nil) model.ProjectId = currentModel.ProjectId - model.InstanceName = currentModel.InstanceName + if currentModel.WorkspaceName != nil { + model.WorkspaceName = currentModel.WorkspaceName + } else { + model.InstanceName = currentModel.InstanceName + } model.Profile = currentModel.Profile response = append(response, model) @@ -183,14 +238,14 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P }, nil } -func getAllStreamConnections(ctx context.Context, conn *admin20231115014.APIClient, projectID, instanceName string) ([]admin20231115014.StreamsConnection, *http.Response, error) { +func getAllStreamConnections(ctx context.Context, conn *admin.APIClient, projectID, workspaceOrInstanceName string) ([]admin.StreamsConnection, *http.Response, error) { pageNum := 1 - accumulatedStreamConns := make([]admin20231115014.StreamsConnection, 0) + accumulatedStreamConns := make([]admin.StreamsConnection, 0) for allRecordsRetrieved := false; !allRecordsRetrieved; { - streamConns, apiResp, err := conn.StreamsApi.ListStreamConnectionsWithParams(ctx, &admin20231115014.ListStreamConnectionsApiParams{ + streamConns, apiResp, err := conn.StreamsApi.ListStreamConnectionsWithParams(ctx, &admin.ListStreamConnectionsApiParams{ GroupId: projectID, - TenantName: instanceName, + TenantName: workspaceOrInstanceName, ItemsPerPage: util.Pointer(constants.DefaultListItemsPerPage), PageNum: util.Pointer(pageNum), }).Execute() diff --git a/cfn-resources/stream-connection/cmd/resource/resource_test.go b/cfn-resources/stream-connection/cmd/resource/resource_test.go new file mode 100644 index 000000000..026c6d554 --- /dev/null +++ b/cfn-resources/stream-connection/cmd/resource/resource_test.go @@ -0,0 +1,392 @@ +// Copyright 2026 MongoDB Inc +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource_test + +import ( + "fmt" + "net/http" + "testing" + + "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" + "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-connection/cmd/resource" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util" + "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312010/mockadmin" +) + +const ( + testProjectID = "507f1f77bcf86cd799439011" + testConnectionName = "test-connection" + testWorkspaceName = "test-workspace" + testProfile = "default" + testClusterName = "test-cluster" + testRole = "atlasAdmin" + testRoleType = "BUILT_IN" + msgRequired = "required" + msgWorkspaceRequired = "Either WorkspaceName or InstanceName must be provided" +) + +func createTestClusterConnectionModel() *resource.Model { + return &resource.Model{ + Profile: util.StringPtr(testProfile), + ProjectId: util.StringPtr(testProjectID), + ConnectionName: util.StringPtr(testConnectionName), + WorkspaceName: util.StringPtr(testWorkspaceName), + Type: util.StringPtr(resource.ClusterConnectionType), + ClusterName: util.StringPtr(testClusterName), + DbRoleToExecute: &resource.DBRoleToExecute{ + Role: util.StringPtr(testRole), + Type: util.StringPtr(testRoleType), + }, + } +} + +func createTestStreamConnectionResponse(connType string) *admin.StreamsConnection { + name := testConnectionName + response := &admin.StreamsConnection{ + Name: &name, + Type: &connType, + } + if connType == resource.ClusterConnectionType { + response.ClusterName = util.StringPtr(testClusterName) + response.DbRoleToExecute = &admin.DBRoleToExecute{ + Role: admin.PtrString(testRole), + Type: admin.PtrString(testRoleType), + } + } + return response +} + +func TestConstants(t *testing.T) { + assert.Equal(t, "Cluster", resource.ClusterConnectionType) + assert.Equal(t, "Kafka", resource.KafkaConnectionType) + assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName, constants.Type}, resource.CreateRequiredFields) + assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName}, resource.ReadRequiredFields) + assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName, constants.Type}, resource.UpdateRequiredFields) + assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName}, resource.DeleteRequiredFields) + assert.Equal(t, []string{constants.ProjectID}, resource.ListRequiredFields) +} + +func TestValidationErrors(t *testing.T) { + testCases := map[string]struct { + operation func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) + currentModel *resource.Model + expectedMsg string + }{ + "Create_missingProjectId": { + operation: resource.Create, + currentModel: &resource.Model{ + Profile: util.StringPtr(testProfile), ConnectionName: util.StringPtr(testConnectionName), + WorkspaceName: util.StringPtr(testWorkspaceName), Type: util.StringPtr(resource.ClusterConnectionType), + }, + expectedMsg: msgRequired, + }, + "Create_missingConnectionName": { + operation: resource.Create, + currentModel: &resource.Model{ + Profile: util.StringPtr(testProfile), ProjectId: util.StringPtr(testProjectID), + WorkspaceName: util.StringPtr(testWorkspaceName), Type: util.StringPtr(resource.ClusterConnectionType), + }, + expectedMsg: msgRequired, + }, + "Create_missingWorkspaceOrInstanceName": { + operation: resource.Create, + currentModel: &resource.Model{ + Profile: util.StringPtr(testProfile), ProjectId: util.StringPtr(testProjectID), + ConnectionName: util.StringPtr(testConnectionName), Type: util.StringPtr(resource.ClusterConnectionType), + }, + expectedMsg: msgWorkspaceRequired, + }, + "Read_missingProjectId": { + operation: resource.Read, + currentModel: &resource.Model{ConnectionName: util.StringPtr(testConnectionName), WorkspaceName: util.StringPtr(testWorkspaceName)}, + expectedMsg: msgRequired, + }, + "Read_missingWorkspaceOrInstanceName": { + operation: resource.Read, + currentModel: &resource.Model{ProjectId: util.StringPtr(testProjectID), ConnectionName: util.StringPtr(testConnectionName)}, + expectedMsg: msgWorkspaceRequired, + }, + "Update_missingProjectId": { + operation: resource.Update, + currentModel: &resource.Model{ + Profile: util.StringPtr(testProfile), ConnectionName: util.StringPtr(testConnectionName), + WorkspaceName: util.StringPtr(testWorkspaceName), Type: util.StringPtr(resource.ClusterConnectionType), + }, + expectedMsg: msgRequired, + }, + "Delete_missingProjectId": { + operation: resource.Delete, + currentModel: &resource.Model{ConnectionName: util.StringPtr(testConnectionName), WorkspaceName: util.StringPtr(testWorkspaceName)}, + expectedMsg: msgRequired, + }, + "List_missingProjectId": { + operation: resource.List, + currentModel: &resource.Model{WorkspaceName: util.StringPtr(testWorkspaceName)}, + expectedMsg: msgRequired, + }, + "List_missingWorkspaceOrInstanceName": { + operation: resource.List, + currentModel: &resource.Model{ProjectId: util.StringPtr(testProjectID)}, + expectedMsg: msgWorkspaceRequired, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + needsMock := name == "Create_missingWorkspaceOrInstanceName" || + name == "Read_missingWorkspaceOrInstanceName" || + name == "List_missingWorkspaceOrInstanceName" + if needsMock { + cleanup := setupMockClient(t, func(*mockadmin.StreamsApi) {}) + defer cleanup() + } + event, err := tc.operation(handler.Request{}, nil, tc.currentModel) + require.NoError(t, err) + assert.Equal(t, handler.Failed, event.OperationStatus) + assert.Contains(t, event.Message, tc.expectedMsg) + }) + } +} + +func setupMockClient(t *testing.T, mockSetup func(*mockadmin.StreamsApi)) func() { + t.Helper() + originalInitEnv := resource.InitEnvWithLatestClient + mockStreamsAPI := mockadmin.NewStreamsApi(t) + mockSetup(mockStreamsAPI) + mockClient := &admin.APIClient{StreamsApi: mockStreamsAPI} + resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { + return mockClient, nil + } + return func() { resource.InitEnvWithLatestClient = originalInitEnv } +} + +func TestCRUDOperations(t *testing.T) { + testCases := map[string]struct { + operation func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) + model *resource.Model + mockSetup func(*mockadmin.StreamsApi) + validateResult func(t *testing.T, event handler.ProgressEvent) + expectedStatus handler.Status + }{ + "Create_success": { + operation: resource.Create, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + response := createTestStreamConnectionResponse(resource.ClusterConnectionType) + m.EXPECT().CreateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.CreateStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().CreateStreamConnectionExecute(mock.Anything). + Return(response, &http.Response{StatusCode: 201}, nil) + }, + expectedStatus: handler.Success, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Equal(t, "Create Completed", event.Message) + assert.NotNil(t, event.ResourceModel) + }, + }, + "Create_kafka": { + operation: resource.Create, + model: &resource.Model{ + Profile: util.StringPtr(testProfile), ProjectId: util.StringPtr(testProjectID), + ConnectionName: util.StringPtr(testConnectionName), WorkspaceName: util.StringPtr(testWorkspaceName), + Type: util.StringPtr(resource.KafkaConnectionType), BootstrapServers: util.StringPtr("broker1:9092"), + Security: &resource.StreamsKafkaSecurity{Protocol: util.StringPtr("SSL")}, + Authentication: &resource.StreamsKafkaAuthentication{Mechanism: util.StringPtr("PLAIN")}, + }, + mockSetup: func(m *mockadmin.StreamsApi) { + bootstrap := "broker1:9092" + response := &admin.StreamsConnection{ + Name: util.StringPtr(testConnectionName), Type: util.StringPtr(resource.KafkaConnectionType), + BootstrapServers: &bootstrap, + } + m.EXPECT().CreateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.CreateStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().CreateStreamConnectionExecute(mock.Anything). + Return(response, &http.Response{StatusCode: 201}, nil) + }, + expectedStatus: handler.Success, + }, + "Create_apiError": { + operation: resource.Create, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + m.EXPECT().CreateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.CreateStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().CreateStreamConnectionExecute(mock.Anything). + Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) + }, + expectedStatus: handler.Failed, + }, + "Read_success": { + operation: resource.Read, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + response := createTestStreamConnectionResponse(resource.ClusterConnectionType) + m.EXPECT().GetStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.GetStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().GetStreamConnectionExecute(mock.Anything). + Return(response, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "Read_notFound": { + operation: resource.Read, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + m.EXPECT().GetStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.GetStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().GetStreamConnectionExecute(mock.Anything). + Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) + }, + expectedStatus: handler.Failed, + }, + "Update_success": { + operation: resource.Update, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + response := createTestStreamConnectionResponse(resource.ClusterConnectionType) + m.EXPECT().UpdateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.UpdateStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().UpdateStreamConnectionExecute(mock.Anything). + Return(response, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + }, + "Update_apiError": { + operation: resource.Update, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + m.EXPECT().UpdateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.UpdateStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().UpdateStreamConnectionExecute(mock.Anything). + Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) + }, + expectedStatus: handler.Failed, + }, + "Delete_success": { + operation: resource.Delete, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + m.EXPECT().DeleteStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.DeleteStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().DeleteStreamConnectionExecute(mock.Anything). + Return(&http.Response{StatusCode: 204}, nil) + }, + expectedStatus: handler.Success, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Equal(t, "Delete Completed", event.Message) + assert.Nil(t, event.ResourceModel) + }, + }, + "Delete_notFound": { + operation: resource.Delete, + model: createTestClusterConnectionModel(), + mockSetup: func(m *mockadmin.StreamsApi) { + m.EXPECT().DeleteStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(admin.DeleteStreamConnectionApiRequest{ApiService: m}) + m.EXPECT().DeleteStreamConnectionExecute(mock.Anything). + Return(&http.Response{StatusCode: 404}, fmt.Errorf("not found")) + }, + expectedStatus: handler.Failed, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Equal(t, string(types.HandlerErrorCodeNotFound), event.HandlerErrorCode) + }, + }, + "List_success": { + operation: resource.List, + model: &resource.Model{ + ProjectId: util.StringPtr(testProjectID), WorkspaceName: util.StringPtr(testWorkspaceName), + Profile: util.StringPtr(testProfile), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + connections := []admin.StreamsConnection{*createTestStreamConnectionResponse(resource.ClusterConnectionType)} + totalCount := 1 + response := &admin.PaginatedApiStreamsConnection{ + Results: &connections, TotalCount: &totalCount, + } + m.EXPECT().ListStreamConnectionsWithParams(mock.Anything, mock.Anything). + Return(admin.ListStreamConnectionsApiRequest{ApiService: m}) + m.EXPECT().ListStreamConnectionsExecute(mock.Anything). + Return(response, &http.Response{StatusCode: 200}, nil) + }, + expectedStatus: handler.Success, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.Len(t, event.ResourceModels, 1) + }, + }, + "List_withPagination": { + operation: resource.List, + model: &resource.Model{ + ProjectId: util.StringPtr(testProjectID), WorkspaceName: util.StringPtr(testWorkspaceName), + Profile: util.StringPtr(testProfile), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + conn1 := []admin.StreamsConnection{*createTestStreamConnectionResponse(resource.ClusterConnectionType)} + conn2 := []admin.StreamsConnection{*createTestStreamConnectionResponse(resource.ClusterConnectionType)} + totalCount := 2 + m.EXPECT().ListStreamConnectionsWithParams(mock.Anything, mock.Anything). + Return(admin.ListStreamConnectionsApiRequest{ApiService: m}).Times(2) + m.EXPECT().ListStreamConnectionsExecute(mock.Anything). + Return(&admin.PaginatedApiStreamsConnection{Results: &conn1, TotalCount: &totalCount}, + &http.Response{StatusCode: 200}, nil).Once() + m.EXPECT().ListStreamConnectionsExecute(mock.Anything). + Return(&admin.PaginatedApiStreamsConnection{Results: &conn2, TotalCount: &totalCount}, + &http.Response{StatusCode: 200}, nil).Once() + }, + expectedStatus: handler.Success, + validateResult: func(t *testing.T, event handler.ProgressEvent) { + t.Helper() + assert.GreaterOrEqual(t, len(event.ResourceModels), 2) + }, + }, + "List_apiError": { + operation: resource.List, + model: &resource.Model{ + ProjectId: util.StringPtr(testProjectID), WorkspaceName: util.StringPtr(testWorkspaceName), + Profile: util.StringPtr(testProfile), + }, + mockSetup: func(m *mockadmin.StreamsApi) { + m.EXPECT().ListStreamConnectionsWithParams(mock.Anything, mock.Anything). + Return(admin.ListStreamConnectionsApiRequest{ApiService: m}) + m.EXPECT().ListStreamConnectionsExecute(mock.Anything). + Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) + }, + expectedStatus: handler.Failed, + }, + } + + for name, tc := range testCases { + t.Run(name, func(t *testing.T) { + cleanup := setupMockClient(t, tc.mockSetup) + defer cleanup() + event, err := tc.operation(handler.Request{}, nil, tc.model) + require.NoError(t, err) + assert.Equal(t, tc.expectedStatus, event.OperationStatus) + if tc.validateResult != nil { + tc.validateResult(t, event) + } + }) + } +} diff --git a/cfn-resources/stream-connection/docs/README.md b/cfn-resources/stream-connection/docs/README.md index 4ea42432d..bb4308036 100644 --- a/cfn-resources/stream-connection/docs/README.md +++ b/cfn-resources/stream-connection/docs/README.md @@ -1,6 +1,6 @@ # MongoDB::Atlas::StreamConnection -Returns, adds, edits, and removes one connection for a stream instance in the specified project. To use this resource, the requesting API Key must have the Project Owner roles. +Returns, adds, edits, and removes one connection for a stream workspace in the specified project. To use this resource, the requesting API Key must have the Project Owner roles. ## Syntax @@ -16,13 +16,19 @@ To declare this entity in your AWS CloudFormation template, use the following sy "Profile" : String, "ConnectionName" : String, "InstanceName" : String, + "WorkspaceName" : String, "Type" : String, "ClusterName" : String, + "ClusterProjectId" : String, "DbRoleToExecute" : DBRoleToExecute, "Authentication" : StreamsKafkaAuthentication, "BootstrapServers" : String, "Security" : StreamsKafkaSecurity, - "Config" : Config + "Config" : Config, + "Networking" : Networking, + "Aws" : Aws, + "Url" : String, + "Headers" : Headers } } @@ -36,13 +42,19 @@ Properties: Profile: String ConnectionName: String InstanceName: String + WorkspaceName: String Type: String ClusterName: String + ClusterProjectId: String DbRoleToExecute: DBRoleToExecute Authentication: StreamsKafkaAuthentication BootstrapServers: String Security: StreamsKafkaSecurity Config: Config + Networking: Networking + Aws: Aws + Url: String + Headers: Headers ## Properties @@ -87,9 +99,19 @@ _Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/l #### InstanceName -Human-readable label that identifies the stream instance. +Human-readable label that identifies the stream instance. Deprecated: Use WorkspaceName instead. -_Required_: Yes +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### WorkspaceName + +Human-readable label that identifies the stream workspace. + +_Required_: No _Type_: String @@ -97,15 +119,15 @@ _Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/l #### Type -Type of the connection. Can be either Cluster, Kafka, or Sample. +Type of the connection. Can be Cluster, Kafka, Sample, AWSLambda, or Https. _Required_: Yes _Type_: String -_Allowed Values_: Kafka | Cluster | Sample +_Allowed Values_: Kafka | Cluster | Sample | AWSLambda | Https -_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) #### ClusterName @@ -117,6 +139,22 @@ _Type_: String _Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +#### ClusterProjectId + +Unique 24-hexadecimal digit string that identifies the project containing the cluster for cross-project cluster connections. + +_Required_: No + +_Type_: String + +_Minimum Length_: 24 + +_Maximum Length_: 24 + +_Pattern_: ^([a-f0-9]{24})$ + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + #### DbRoleToExecute The name of a Built in or Custom DB Role to connect to an Atlas Cluster. @@ -167,3 +205,43 @@ _Type_: Config _Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +#### Networking + +Networking configuration for AWS PrivateLink connections. + +_Required_: No + +_Type_: Networking + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Aws + +AWS Lambda connection configuration. + +_Required_: No + +_Type_: Aws + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Url + +URL endpoint for HTTPS type connections. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Headers + +HTTP headers for HTTPS type connections. + +_Required_: No + +_Type_: Headers + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/aws.md b/cfn-resources/stream-connection/docs/aws.md new file mode 100644 index 000000000..66007a6b8 --- /dev/null +++ b/cfn-resources/stream-connection/docs/aws.md @@ -0,0 +1,34 @@ +# MongoDB::Atlas::StreamConnection Aws + +AWS Lambda connection configuration. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "RoleArn" : String
+}
+
+ +### YAML + +
+RoleArn: String
+
+ +## Properties + +#### RoleArn + +Amazon Resource Name (ARN) of the IAM role for AWS Lambda connection. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/headers.md b/cfn-resources/stream-connection/docs/headers.md new file mode 100644 index 000000000..24328b6bb --- /dev/null +++ b/cfn-resources/stream-connection/docs/headers.md @@ -0,0 +1,32 @@ +# MongoDB::Atlas::StreamConnection Headers + +HTTP headers for HTTPS type connections. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "^[A-Za-z0-9-]+$" : String
+}
+
+ +### YAML + +
+^[A-Za-z0-9-]+$: String
+
+ +## Properties + +#### \^[A-Za-z0-9-]+$ + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/networking.md b/cfn-resources/stream-connection/docs/networking.md new file mode 100644 index 000000000..2be60063a --- /dev/null +++ b/cfn-resources/stream-connection/docs/networking.md @@ -0,0 +1,32 @@ +# MongoDB::Atlas::StreamConnection Networking + +Networking configuration for AWS PrivateLink connections. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Access" : Networking
+}
+
+ +### YAML + +
+Access: Networking
+
+ +## Properties + +#### Access + +_Required_: Yes + +_Type_: Networking + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/streamskafkaauthentication.md b/cfn-resources/stream-connection/docs/streamskafkaauthentication.md index 960abc4f9..2c4b6ce62 100644 --- a/cfn-resources/stream-connection/docs/streamskafkaauthentication.md +++ b/cfn-resources/stream-connection/docs/streamskafkaauthentication.md @@ -11,8 +11,14 @@ To declare this entity in your AWS CloudFormation template, use the following sy
 {
     "Mechanism" : String,
+    "Method" : String,
     "Username" : String,
-    "Password" : String
+    "Password" : String,
+    "TokenEndpointUrl" : String,
+    "ClientId" : String,
+    "ClientSecret" : String,
+    "Scope" : String,
+    "SaslOauthbearerExtensions" : String
 }
 
@@ -20,15 +26,31 @@ To declare this entity in your AWS CloudFormation template, use the following sy
 Mechanism: String
+Method: String
 Username: String
 Password: String
+TokenEndpointUrl: String
+ClientId: String
+ClientSecret: String
+Scope: String
+SaslOauthbearerExtensions: String
 
## Properties #### Mechanism -Style of authentication. Can be one of PLAIN, SCRAM-256, or SCRAM-512. +Style of authentication. Can be one of PLAIN, SCRAM-256, SCRAM-512, or OAUTHBEARER. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Method + +OAuth authentication method. _Required_: No @@ -56,3 +78,53 @@ _Type_: String _Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +#### TokenEndpointUrl + +OAuth token endpoint URL. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### ClientId + +OAuth client ID. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### ClientSecret + +OAuth client secret. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Scope + +OAuth scope. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### SaslOauthbearerExtensions + +SASL OAuth bearer extensions. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json b/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json index c6a724f64..d1da0de12 100644 --- a/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json +++ b/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json @@ -1,7 +1,7 @@ { "typeName": "MongoDB::Atlas::StreamConnection", "additionalProperties": false, - "description": "Returns, adds, edits, and removes one connection for a stream instance in the specified project. To use this resource, the requesting API Key must have the Project Owner roles.", + "description": "Returns, adds, edits, and removes one connection for a stream workspace in the specified project. To use this resource, the requesting API Key must have the Project Owner roles.", "sourceUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources", "documentationUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources/blob/master/cfn-resources/stream-connection/README.md", "definitions": { @@ -30,7 +30,11 @@ "properties": { "Mechanism": { "type": "string", - "description": "Style of authentication. Can be one of PLAIN, SCRAM-256, or SCRAM-512." + "description": "Style of authentication. Can be one of PLAIN, SCRAM-256, SCRAM-512, or OAUTHBEARER." + }, + "Method": { + "type": "string", + "description": "OAuth authentication method." }, "Username": { "type": "string", @@ -40,6 +44,27 @@ "type": "string", "format": "password", "description": "Password of the account to connect to the Kafka cluster. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials." + }, + "TokenEndpointUrl": { + "type": "string", + "description": "OAuth token endpoint URL." + }, + "ClientId": { + "type": "string", + "description": "OAuth client ID." + }, + "ClientSecret": { + "type": "string", + "format": "password", + "description": "OAuth client secret. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials." + }, + "Scope": { + "type": "string", + "description": "OAuth scope." + }, + "SaslOauthbearerExtensions": { + "type": "string", + "description": "SASL OAuth bearer extensions." } }, "additionalProperties": false @@ -68,6 +93,52 @@ } }, "additionalProperties": false + }, + "Networking": { + "type": "object", + "description": "Networking configuration for AWS PrivateLink connections.", + "properties": { + "Access": { + "type": "object", + "description": "Network access configuration.", + "properties": { + "Type": { + "type": "string", + "description": "Type of network access. PRIVATE_ENDPOINT for AWS PrivateLink.", + "enum": [ + "PRIVATE_ENDPOINT", + "PUBLIC" + ] + }, + "ConnectionId": { + "type": "string", + "description": "Unique identifier of the AWS PrivateLink connection." + } + }, + "required": [ + "Type" + ], + "additionalProperties": false + } + }, + "required": [ + "Access" + ], + "additionalProperties": false + }, + "Aws": { + "type": "object", + "description": "AWS Lambda connection configuration.", + "properties": { + "RoleArn": { + "type": "string", + "description": "Amazon Resource Name (ARN) of the IAM role for AWS Lambda connection." + } + }, + "required": [ + "RoleArn" + ], + "additionalProperties": false } }, "properties": { @@ -89,21 +160,34 @@ }, "InstanceName": { "type": "string", - "description": "Human-readable label that identifies the stream instance." + "description": "Human-readable label that identifies the stream instance. Deprecated: Use WorkspaceName instead." + }, + "WorkspaceName": { + "type": "string", + "description": "Human-readable label that identifies the stream workspace." }, "Type": { "type": "string", - "description": "Type of the connection. Can be either Cluster, Kafka, or Sample.", + "description": "Type of the connection. Can be Cluster, Kafka, Sample, AWSLambda, or Https.", "enum": [ "Kafka", "Cluster", - "Sample" + "Sample", + "AWSLambda", + "Https" ] }, "ClusterName": { "type": "string", "description": "Name of the cluster configured for this connection." }, + "ClusterProjectId": { + "type": "string", + "description": "Unique 24-hexadecimal digit string that identifies the project containing the cluster for cross-project cluster connections.", + "maxLength": 24, + "minLength": 24, + "pattern": "^([a-f0-9]{24})$" + }, "DbRoleToExecute": { "$ref": "#/definitions/DBRoleToExecute" }, @@ -119,6 +203,26 @@ }, "Config": { "$ref": "#/definitions/Config" + }, + "Networking": { + "$ref": "#/definitions/Networking" + }, + "Aws": { + "$ref": "#/definitions/Aws" + }, + "Url": { + "type": "string", + "description": "URL endpoint for HTTPS type connections." + }, + "Headers": { + "type": "object", + "description": "HTTP headers for HTTPS type connections.", + "patternProperties": { + "^[A-Za-z0-9-]+$": { + "type": "string" + } + }, + "additionalProperties": false } }, "handlers": { @@ -151,23 +255,25 @@ "primaryIdentifier": [ "/properties/ProjectId", "/properties/ConnectionName", - "/properties/InstanceName", + "/properties/WorkspaceName", "/properties/Profile" ], "required": [ "ProjectId", "ConnectionName", - "InstanceName", "Type" ], "createOnlyProperties": [ "/properties/ProjectId", "/properties/InstanceName", + "/properties/WorkspaceName", "/properties/ConnectionName", + "/properties/Type", "/properties/Profile" ], "writeOnlyProperties": [ - "/properties/Authentication/Password" + "/properties/Authentication/Password", + "/properties/Authentication/ClientSecret" ], "tagging": { "taggable": false diff --git a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh index c64cf16aa..7a3451421 100755 --- a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh @@ -18,73 +18,129 @@ fi projectName="${1:-$PROJECT_NAME}" echo "$projectName" -projectId=$(atlas projects list --output json | jq --arg NAME "${projectName}" -r '.results[] | select(.name==$NAME) | .id') -if [ -z "$projectId" ]; then - projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') - echo -e "Created project \"${projectName}\" with id: ${projectId}\n" +# Use existing project ID if set, otherwise try to find or create project +if [ -n "${MONGODB_ATLAS_PROJECT_ID:-}" ]; then + projectId="${MONGODB_ATLAS_PROJECT_ID}" + echo -e "Using existing project ID from MONGODB_ATLAS_PROJECT_ID: ${projectId}\n" else - echo -e "FOUND project \"${projectName}\" with id: ${projectId}\n" + projectId=$(atlas projects list --output json | jq --arg NAME "${projectName}" -r '.results[] | select(.name==$NAME) | .id') + if [ -z "$projectId" ]; then + projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') + echo -e "Created project \"${projectName}\" with id: ${projectId}\n" + else + echo -e "FOUND project \"${projectName}\" with id: ${projectId}\n" + fi fi echo -e "=====\nrun this command to clean up\n=====\nmongocli iam projects delete ${projectId} --force\n=====" -instanceName="stream-instance-$(date +%s)-$RANDOM" +workspaceName="stream-workspace-$(date +%s)-$RANDOM" cloudProvider="AWS" clusterName="cluster-$(date +%s)-$RANDOM" -atlas streams instances create "${instanceName}" --projectId "${projectId}" --region VIRGINIA_USA --provider ${cloudProvider} -echo -e "Created StreamInstance \"${instanceName}\"" + +atlas streams instances create "${workspaceName}" --projectId "${projectId}" --region VIRGINIA_USA --provider ${cloudProvider} +echo -e "Created StreamWorkspace \"${workspaceName}\"" atlas clusters create "${clusterName}" --projectId "${projectId}" --backup --provider AWS --region US_EAST_1 --members 3 --tier M10 --diskSizeGB 10 --output=json atlas clusters watch "${clusterName}" --projectId "${projectId}" echo -e "Created Cluster \"${clusterName}\"" jq --arg cluster_name "$clusterName" \ - --arg instance_name "$instanceName" \ + --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ClusterName?|=$cluster_name | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_1_create.json" >"inputs/inputs_1_create.json" jq --arg cluster_name "$clusterName" \ - --arg instance_name "$instanceName" \ + --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ClusterName?|=$cluster_name | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_1_update.json" >"inputs/inputs_1_update.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_2_create.json" >"inputs/inputs_2_create.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_2_update.json" >"inputs/inputs_2_update.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_3_create.json" >"inputs/inputs_3_create.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_3_update.json" >"inputs/inputs_3_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_4_create.json" >"inputs/inputs_4_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_4_update.json" >"inputs/inputs_4_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_5_create.json" >"inputs/inputs_5_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_5_update.json" >"inputs/inputs_5_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_6_create.json" >"inputs/inputs_6_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_6_update.json" >"inputs/inputs_6_update.json" + \ No newline at end of file diff --git a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh index 44f67754b..91289aef8 100755 --- a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh @@ -12,7 +12,20 @@ function usage { projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) clusterName=$(jq -r '.ClusterName' ./inputs/inputs_1_create.json) -instanceName=$(jq -r '.InstanceName' ./inputs/inputs_1_create.json) + +# Get workspace name or instance name (workspace name takes precedence) +workspaceName=$(jq -r '.WorkspaceName // empty' ./inputs/inputs_1_create.json) +instanceName=$(jq -r '.InstanceName // empty' ./inputs/inputs_1_create.json) + +# Use WorkspaceName if available, otherwise fall back to InstanceName +if [ -n "${workspaceName}" ] && [ "${workspaceName}" != "null" ] && [ "${workspaceName}" != "" ]; then + workspaceOrInstanceName="${workspaceName}" +elif [ -n "${instanceName}" ] && [ "${instanceName}" != "null" ] && [ "${instanceName}" != "" ]; then + workspaceOrInstanceName="${instanceName}" +else + echo "Error: Neither WorkspaceName nor InstanceName found in inputs_1_create.json" + exit 1 +fi if atlas cluster delete "${clusterName}" --projectId "${projectId}" --force; then echo "deleting cluster with name ${clusterName}" @@ -25,11 +38,11 @@ if [ "$status" -eq 0 ]; then echo "Cluster '${clusterName}' has been successfully watched until deletion." fi -#delete stream instance -if atlas streams instances delete "${instanceName}" --projectId "${projectId}" --force; then - echo "deleting stream instance with name ${instanceName}" +#delete stream workspace/instance (using instances delete for backward compatibility) +if atlas streams instances delete "${workspaceOrInstanceName}" --projectId "${projectId}" --force; then + echo "deleting stream workspace/instance with name ${workspaceOrInstanceName}" else - echo "failed to delete the stream instance with name ${instanceName}" + echo "failed to delete the stream workspace/instance with name ${workspaceOrInstanceName}" fi #delete project diff --git a/cfn-resources/stream-connection/test/inputs_1_create.json b/cfn-resources/stream-connection/test/inputs_1_create.json index cf5167243..11e56aae1 100644 --- a/cfn-resources/stream-connection/test/inputs_1_create.json +++ b/cfn-resources/stream-connection/test/inputs_1_create.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ClusterName": "", "ConnectionName": "ConnectionNameCluster", "Type": "Cluster", diff --git a/cfn-resources/stream-connection/test/inputs_1_update.json b/cfn-resources/stream-connection/test/inputs_1_update.json index c653114ae..285ca5635 100644 --- a/cfn-resources/stream-connection/test/inputs_1_update.json +++ b/cfn-resources/stream-connection/test/inputs_1_update.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ClusterName": "", "ConnectionName": "ConnectionNameCluster", "Type": "Cluster", diff --git a/cfn-resources/stream-connection/test/inputs_2_create.json b/cfn-resources/stream-connection/test/inputs_2_create.json index c454e2279..f99459cbb 100644 --- a/cfn-resources/stream-connection/test/inputs_2_create.json +++ b/cfn-resources/stream-connection/test/inputs_2_create.json @@ -1,14 +1,14 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "ConnectionNameKafka", "Type": "Kafka", "BootstrapServers": "localhost:9092,localhost:9092", "Authentication": { "Mechanism": "PLAIN", "Username": "user1", - "Password": "rawpassword" + "Password": "" }, "Security": { "Protocol": "PLAINTEXT" diff --git a/cfn-resources/stream-connection/test/inputs_2_update.json b/cfn-resources/stream-connection/test/inputs_2_update.json index 19076817f..c76dea7f4 100644 --- a/cfn-resources/stream-connection/test/inputs_2_update.json +++ b/cfn-resources/stream-connection/test/inputs_2_update.json @@ -1,14 +1,14 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "ConnectionNameKafka", "Type": "Kafka", "BootstrapServers": "localhost:9092,localhost:9092", "Authentication": { "Mechanism": "PLAIN", "Username": "user1", - "Password": "rawpassword" + "Password": "" }, "Security": { "Protocol": "SSL", diff --git a/cfn-resources/stream-connection/test/inputs_3_create.json b/cfn-resources/stream-connection/test/inputs_3_create.json index 561899222..0ba44a39b 100644 --- a/cfn-resources/stream-connection/test/inputs_3_create.json +++ b/cfn-resources/stream-connection/test/inputs_3_create.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "sample_stream_solar", "Type": "Sample" } diff --git a/cfn-resources/stream-connection/test/inputs_3_update.json b/cfn-resources/stream-connection/test/inputs_3_update.json index 561899222..0ba44a39b 100644 --- a/cfn-resources/stream-connection/test/inputs_3_update.json +++ b/cfn-resources/stream-connection/test/inputs_3_update.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "sample_stream_solar", "Type": "Sample" } diff --git a/cfn-resources/stream-connection/test/inputs_4_create.json b/cfn-resources/stream-connection/test/inputs_4_create.json new file mode 100644 index 000000000..cc17a61f6 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_4_create.json @@ -0,0 +1,11 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameAWSLambda", + "Type": "AWSLambda", + "Aws": { + "RoleArn": "arn:aws:iam::263641576157:role/mongodb-atlas-streams-lambda-new" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_4_update.json b/cfn-resources/stream-connection/test/inputs_4_update.json new file mode 100644 index 000000000..08135e379 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_4_update.json @@ -0,0 +1,11 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameAWSLambda", + "Type": "AWSLambda", + "Aws": { + "RoleArn": "arn:aws:iam::263641576157:role/mongodb-atlas-streams-lambda-new-updated" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_5_create.json b/cfn-resources/stream-connection/test/inputs_5_create.json new file mode 100644 index 000000000..00c6b7be4 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_5_create.json @@ -0,0 +1,13 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameHttps", + "Type": "Https", + "Url": "https://api.example.com/webhook", + "Headers": { + "Content-Type": "application/json", + "Authorization": "Bearer token123" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_5_update.json b/cfn-resources/stream-connection/test/inputs_5_update.json new file mode 100644 index 000000000..fe4ed45f8 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_5_update.json @@ -0,0 +1,14 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameHttps", + "Type": "Https", + "Url": "https://api.example.com/webhook/v2", + "Headers": { + "Content-Type": "application/json", + "Authorization": "Bearer updated-token-456", + "X-API-Key": "new-api-key" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_6_create.json b/cfn-resources/stream-connection/test/inputs_6_create.json new file mode 100644 index 000000000..f402e8b6a --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_6_create.json @@ -0,0 +1,30 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameKafkaOAuth", + "Type": "Kafka", + "BootstrapServers": "pkc-example.us-east-1.aws.confluent.cloud:9092", + "Authentication": { + "Mechanism": "OAUTHBEARER", + "Method": "OIDC", + "TokenEndpointUrl": "https://oauth.example.com/oauth2/token", + "ClientId": "test-client-id-12345", + "ClientSecret": "", + "Scope": "read:messages write:messages", + "SaslOauthbearerExtensions": "logicalCluster=lkc-test,identityPoolId=pool-test" + }, + "Security": { + "Protocol": "SASL_SSL", + "BrokerPublicCertificate": "-----BEGIN CERTIFICATE-----\nMIIENTCCAx2gAwIBAgIJAPWNjXbYMr7lMA0GCSqGSIb3DQEBCwUAMGwxCzAJBgNV\nBAYTAklFMQowCAYDVQQIDAFEMQ8wDQYDVQQHDAZEdWJsaW4xFTATBgNVBAoMDERP\nIE5PVCBUUlVTVDEVMBMGA1UECwwMRE8gTk9UIFRSVVNUMRIwEAYDVQQDDAlsb2Nh\nbGhvc3QwHhcNMjIwNDE5MTYxNDI5WhcNMjMwOTAxMTYxNDI5WjBsMQswCQYDVQQG\nEwJJRTEKMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBO\nT1QgVFJVU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxo\nb3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv6tWJkTr99TuxWN2\nih7uXVIbjRCd1pLTvmoZxHee4TYbs7zwHCzanbTeqQ2LOZlrqHLwmJ9E+xrkDSsB\nmlDfI3J9f5dIBeEZAZDP9GcZ64KCLq4PgdQV0YLPiuwYyEuIPZrDkNY7weVqBpk9\noEf4HLktxHx+zbsp6/SxAMKCYBTcy8wioccdLI8lBLJeVOl/KsuxfkGILoH+ryl5\nqBdYGeZzGnOjU4cJVFOCvJ7zJDn2ASGghO7JbmKPotr/NeY0MXEKJR4zHIHyYvRh\nKit5V5bq3DJw5kp0TFkVpjhRaMaLkaP8w97bEvaOthV5fJB94WG44eEuYhuO/xyY\nh2SLEwIDAQABo4HZMIHWMIGGBgNVHSMEfzB9oXCkbjBsMQswCQYDVQQGEwJJRTEK\nMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBOT1QgVFJV\nU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxob3N0ggkA\n2D7GIAQ8CcgwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwEGCCsGAQUFBwMCMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDANBgkqhkiG9w0B\nAQsFAAOCAQEAgKINT8ASLnG/k/+H68iqoPfb49melXKtRiVG5jYlCN8P7v3Yj/AT\nm3Wbq/cGayd2sewh4UgvkmUWEuw6OCBsORT/E9+teq7G/XbWK6YGpc7WCzJT0kJD\n8sOK2LuRegPM7gEoIZ5KBycVBxB3mLkIyiOeFpCK+ZoW8gd9Ug2ZNK4YAyMDFfW9\nyJ7hJThLZmckaMZBY83yrSD3BTevLN22cWphj9Sna7BW+7c5Pqw3W9i4YO4wSmwU\nJ1FPS2VF0Pz5ORDNp5fgz2JVS4b3k2IQ0dEIXQW3OeBO1i7p+frUOroQFu8ZXLac\nromOggcaq3uWOek9yP+3XusUjXWJ3ZPPsA==\n-----END CERTIFICATE-----" + }, + "Config": { + "auto.offset.reset": "earliest" + }, + "Networking": { + "Access": { + "Type": "PUBLIC" + } + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_6_update.json b/cfn-resources/stream-connection/test/inputs_6_update.json new file mode 100644 index 000000000..faae07728 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_6_update.json @@ -0,0 +1,30 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameKafkaOAuth", + "Type": "Kafka", + "BootstrapServers": "pkc-example.us-east-1.aws.confluent.cloud:9092", + "Authentication": { + "Mechanism": "OAUTHBEARER", + "Method": "OIDC", + "TokenEndpointUrl": "https://oauth.example.com/oauth2/token", + "ClientId": "test-client-id-updated", + "ClientSecret": "", + "Scope": "read:messages write:messages delete:messages", + "SaslOauthbearerExtensions": "logicalCluster=lkc-updated,identityPoolId=pool-updated" + }, + "Security": { + "Protocol": "SASL_SSL", + "BrokerPublicCertificate": "-----BEGIN CERTIFICATE-----\nMIIENTCCAx2gAwIBAgIJAPWNjXbYMr7lMA0GCSqGSIb3DQEBCwUAMGwxCzAJBgNV\nBAYTAklFMQowCAYDVQQIDAFEMQ8wDQYDVQQHDAZEdWJsaW4xFTATBgNVBAoMDERP\nIE5PVCBUUlVTVDEVMBMGA1UECwwMRE8gTk9UIFRSVVNUMRIwEAYDVQQDDAlsb2Nh\nbGhvc3QwHhcNMjIwNDE5MTYxNDI5WhcNMjMwOTAxMTYxNDI5WjBsMQswCQYDVQQG\nEwJJRTEKMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBO\nT1QgVFJVU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxo\nb3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv6tWJkTr99TuxWN2\nih7uXVIbjRCd1pLTvmoZxHee4TYbs7zwHCzanbTeqQ2LOZlrqHLwmJ9E+xrkDSsB\nmlDfI3J9f5dIBeEZAZDP9GcZ64KCLq4PgdQV0YLPiuwYyEuIPZrDkNY7weVqBpk9\noEf4HLktxHx+zbsp6/SxAMKCYBTcy8wioccdLI8lBLJeVOl/KsuxfkGILoH+ryl5\nqBdYGeZzGnOjU4cJVFOCvJ7zJDn2ASGghO7JbmKPotr/NeY0MXEKJR4zHIHyYvRh\nKit5V5bq3DJw5kp0TFkVpjhRaMaLkaP8w97bEvaOthV5fJB94WG44eEuYhuO/xyY\nh2SLEwIDAQABo4HZMIHWMIGGBgNVHSMEfzB9oXCkbjBsMQswCQYDVQQGEwJJRTEK\nMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBOT1QgVFJV\nU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxob3N0ggkA\n2D7GIAQ8CcgwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwEGCCsGAQUFBwMCMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDANBgkqhkiG9w0B\nAQsFAAOCAQEAgKINT8ASLnG/k/+H68iqoPfb49melXKtRiVG5jYlCN8P7v3Yj/AT\nm3Wbq/cGayd2sewh4UgvkmUWEuw6OCBsORT/E9+teq7G/XbWK6YGpc7WCzJT0kJD\n8sOK2LuRegPM7gEoIZ5KBycVBxB3mLkIyiOeFpCK+ZoW8gd9Ug2ZNK4YAyMDFfW9\nyJ7hJThLZmckaMZBY83yrSD3BTevLN22cWphj9Sna7BW+7c5Pqw3W9i4YO4wSmwU\nJ1FPS2VF0Pz5ORDNp5fgz2JVS4b3k2IQ0dEIXQW3OeBO1i7p+frUOroQFu8ZXLac\nromOggcaq3uWOek9yP+3XusUjXWJ3ZPPsA==\n-----END CERTIFICATE-----" + }, + "Config": { + "auto.offset.reset": "latest" + }, + "Networking": { + "Access": { + "Type": "PUBLIC" + } + } +} + diff --git a/examples/atlas-streams/stream-connection/README.md b/examples/atlas-streams/stream-connection/README.md new file mode 100644 index 000000000..b565726ec --- /dev/null +++ b/examples/atlas-streams/stream-connection/README.md @@ -0,0 +1,258 @@ +# MongoDB::Atlas::StreamConnection Examples + +This directory contains example CloudFormation templates for creating Stream Connections in MongoDB Atlas. + +## Prerequisites + +1. **Atlas Project**: You need an existing Atlas project. Get your Project ID from the Atlas UI or using: + ```bash + atlas projects list + ``` + +2. **Stream Workspace**: You need an existing Stream Workspace (formerly Stream Instance). Create one using: + ```bash + atlas streams instances create --projectId --region VIRGINIA_USA --provider AWS + ``` + +3. **Atlas Cluster** (for Cluster type connections): You need an existing Atlas cluster. Create one using: + ```bash + atlas clusters create --projectId --provider AWS --region US_EAST_1 --members 3 --tier M10 + ``` + +4. **AWS Credentials**: Ensure your AWS credentials are configured with permissions to: + - Create/update/delete CloudFormation stacks + - Access AWS Secrets Manager (for storing Atlas API keys) + +5. **Atlas API Keys**: Store your Atlas API keys in AWS Secrets Manager: + ```bash + aws secretsmanager create-secret \ + --name cfn/atlas/profile/default \ + --secret-string '{"PublicKey":"YOUR_PUBLIC_KEY","PrivateKey":"YOUR_PRIVATE_KEY"}' + ``` + +## Example Templates + +### 1. Cluster Stream Connection (`cluster-stream-connection.json`) + +Creates a connection of type `Cluster` that connects a Stream Workspace to an Atlas cluster. + +**Parameters:** +- `ProjectId`: Your Atlas project ID (24-hexadecimal characters) +- `WorkspaceName`: Name of the existing Stream Workspace +- `ConnectionName`: Name for the stream connection +- `ClusterName`: Name of the existing Atlas cluster +- `DbRole`: Database role name (e.g., "atlasAdmin", "readWriteAnyDatabase") +- `DbRoleType`: Type of role - "BUILT_IN" or "CUSTOM" +- `Profile`: AWS Secrets Manager profile name (default: "default") + +**Deploy:** +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-connection/cluster-stream-connection.json \ + --stack-name stream-connection-cluster \ + --parameter-overrides \ + ProjectId=YOUR_PROJECT_ID \ + WorkspaceName=YOUR_WORKSPACE_NAME \ + ConnectionName=my-cluster-connection \ + ClusterName=YOUR_CLUSTER_NAME \ + DbRole=atlasAdmin \ + DbRoleType=BUILT_IN \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +**Verify with Atlas CLI:** +```bash +# List all stream connections for the workspace +atlas streams connections list --projectId + +# Get specific connection details +atlas streams connections get --projectId +``` + +**Expected Output:** +- Connection should appear in the list with Type: "Cluster" +- Connection should show ClusterName matching your cluster +- DbRoleToExecute should match the provided role + +### 2. Kafka Stream Connection (`kafka-stream-connection.json`) + +Creates a connection of type `Kafka` that connects a Stream Workspace to a Kafka cluster. + +**Parameters:** +- `ProjectId`: Your Atlas project ID +- `WorkspaceName`: Name of the existing Stream Workspace +- `ConnectionName`: Name for the stream connection +- `BootstrapServers`: Comma-separated list of Kafka broker addresses (e.g., "localhost:9092,localhost:9093") +- `AuthMechanism`: Authentication mechanism - "PLAIN", "SCRAM-256", or "SCRAM-512" +- `AuthUsername`: Kafka username +- `AuthPassword`: Kafka password (will be hidden in console) +- `SecurityProtocol`: "PLAINTEXT" or "SSL" +- `BrokerPublicCertificate`: X.509 certificate for SSL connections (required if SecurityProtocol is SSL) +- `Profile`: AWS Secrets Manager profile name (default: "default") + +**Deploy:** +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-connection/kafka-stream-connection.json \ + --stack-name stream-connection-kafka \ + --parameter-overrides \ + ProjectId=YOUR_PROJECT_ID \ + WorkspaceName=YOUR_WORKSPACE_NAME \ + ConnectionName=my-kafka-connection \ + BootstrapServers=localhost:9092,localhost:9093 \ + AuthMechanism=PLAIN \ + AuthUsername=kafka-user \ + AuthPassword=kafka-password \ + SecurityProtocol=PLAINTEXT \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +**Verify with Atlas CLI:** +```bash +# List all stream connections +atlas streams connections list --projectId + +# Get Kafka connection details +atlas streams connections get --projectId +``` + +**Expected Output:** +- Connection should appear with Type: "Kafka" +- BootstrapServers should match your Kafka cluster +- Authentication mechanism should match the provided value + +### 3. Sample Stream Connection (`sample-stream-connection.json`) + +Creates a connection of type `Sample` that uses a sample dataset (e.g., `sample_stream_solar`). + +**Parameters:** +- `ProjectId`: Your Atlas project ID +- `WorkspaceName`: Name of the existing Stream Workspace +- `ConnectionName`: Name of the sample dataset (default: "sample_stream_solar") +- `Profile`: AWS Secrets Manager profile name (default: "default") + +**Deploy:** +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-connection/sample-stream-connection.json \ + --stack-name stream-connection-sample \ + --parameter-overrides \ + ProjectId=YOUR_PROJECT_ID \ + WorkspaceName=YOUR_WORKSPACE_NAME \ + ConnectionName=sample_stream_solar \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +**Verify with Atlas CLI:** +```bash +# List all stream connections +atlas streams connections list --projectId + +# Get sample connection details +atlas streams connections get sample_stream_solar --projectId +``` + +**Expected Output:** +- Connection should appear with Type: "Sample" +- ConnectionName should be "sample_stream_solar" + +## Field Mapping: CFN Properties → Atlas API + +| CFN Property | Atlas API Field | Notes | +|-------------|----------------|-------| +| `ProjectId` | `groupId` | 24-hexadecimal character project ID | +| `WorkspaceName` | `tenantName` | Stream workspace name (preferred over InstanceName) | +| `InstanceName` | `tenantName` | Deprecated - use WorkspaceName instead | +| `ConnectionName` | `name` | Unique connection name within workspace | +| `Type` | `type` | Connection type: Cluster, Kafka, Sample, AWSLambda, Https | +| `ClusterName` | `clusterName` | Required for Cluster type | +| `DbRoleToExecute.Role` | `dbRoleToExecute.role` | Database role name | +| `DbRoleToExecute.Type` | `dbRoleToExecute.type` | BUILT_IN or CUSTOM | +| `BootstrapServers` | `bootstrapServers` | Required for Kafka type | +| `Authentication.Mechanism` | `authentication.mechanism` | PLAIN, SCRAM-256, SCRAM-512, OAUTHBEARER | +| `Security.Protocol` | `security.protocol` | PLAINTEXT or SSL | + +## Template Validation + +Before deploying, validate the template syntax: + +```bash +# Validate cluster connection template +aws cloudformation validate-template \ + --template-body file://examples/atlas-streams/stream-connection/cluster-stream-connection.json + +# Validate Kafka connection template +aws cloudformation validate-template \ + --template-body file://examples/atlas-streams/stream-connection/kafka-stream-connection.json + +# Validate sample connection template +aws cloudformation validate-template \ + --template-body file://examples/atlas-streams/stream-connection/sample-stream-connection.json +``` + +If validation succeeds, the command will return JSON with template parameters and description. Any syntax errors will be reported. + +## Atlas CLI Validation + +After deploying a stack, verify the connection was created correctly: + +### 1. List All Connections +```bash +atlas streams connections list --projectId --output json +``` + +**Expected**: Your connection should appear in the list with: +- `name`: Matches your ConnectionName parameter +- `type`: Matches your connection type (Cluster, Kafka, or Sample) + +### 2. Get Connection Details +```bash +atlas streams connections get --projectId --output json +``` + +**For Cluster connections, verify:** +- `type` = "Cluster" +- `clusterName` = Your cluster name +- `dbRoleToExecute.role` = Your DbRole parameter +- `dbRoleToExecute.type` = Your DbRoleType parameter + +**For Kafka connections, verify:** +- `type` = "Kafka" +- `bootstrapServers` = Your BootstrapServers parameter +- `authentication.mechanism` = Your AuthMechanism parameter +- `security.protocol` = Your SecurityProtocol parameter + +**For Sample connections, verify:** +- `type` = "Sample" +- `name` = Your ConnectionName parameter (typically "sample_stream_solar") + +### 3. Verify in Atlas UI +1. Navigate to your Atlas project +2. Go to Stream Processing section +3. Select your Stream Workspace +4. View Connections tab +5. Verify the connection appears with correct configuration + +## Cleanup + +To delete a stream connection created via CloudFormation: + +```bash +# Delete the CloudFormation stack (recommended) +aws cloudformation delete-stack --stack-name +aws cloudformation wait stack-delete-complete --stack-name + +# Or delete directly using Atlas CLI +atlas streams connections delete --projectId --force +``` + +## Notes + +- **AWS-Only**: These templates are designed for AWS CloudFormation. Provider is implicitly AWS. +- **Backward Compatibility**: The resource supports both `WorkspaceName` (preferred) and `InstanceName` (deprecated). If both are provided, `WorkspaceName` takes precedence. CFN does not enforce mutual exclusivity. +- **Primary Identifier**: The resource is uniquely identified by: `ProjectId`, `ConnectionName`, `WorkspaceName`, and `Profile`. +- **Sensitive Fields**: Passwords and secrets should be managed through AWS Secrets Manager or CloudFormation parameters with `NoEcho: true`. +- **Required Resources**: Ensure Stream Workspace and (for Cluster connections) Atlas Cluster exist before deploying connection templates. diff --git a/examples/atlas-streams/stream-connection/aws-lambda-stream-connection.json b/examples/atlas-streams/stream-connection/aws-lambda-stream-connection.json new file mode 100644 index 000000000..fd7f3ec67 --- /dev/null +++ b/examples/atlas-streams/stream-connection/aws-lambda-stream-connection.json @@ -0,0 +1,77 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates one connection of type 'AWSLambda' for a given stream instance in the specified project", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id", + "MinLength": 24, + "MaxLength": 24, + "AllowedPattern": "^([a-f0-9]{24})$" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream workspace" + }, + "ConnectionName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream connection", + "Default": "AWSLambdaConnection" + }, + "LambdaRoleArn": { + "Type": "String", + "Description": "Amazon Resource Name (ARN) of the IAM role that Stream Processing will assume to access AWS Lambda. The role must have permissions to invoke Lambda functions.", + "AllowedPattern": "^arn:aws:iam::[0-9]{12}:role/[a-zA-Z0-9+=,.@_-]+$" + } + }, + "Mappings": {}, + "Resources": { + "StreamConnection": { + "Type": "MongoDB::Atlas::StreamConnection", + "Properties": { + "ProjectId": { + "Ref": "ProjectId" + }, + "Profile": { + "Ref": "Profile" + }, + "ConnectionName": { + "Ref": "ConnectionName" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "Type": "AWSLambda", + "Aws": { + "RoleArn": { + "Ref": "LambdaRoleArn" + } + } + } + } + }, + "Outputs": { + "ConnectionName": { + "Description": "Name of the created stream connection", + "Value": { + "Ref": "ConnectionName" + } + }, + "ConnectionType": { + "Description": "Type of the stream connection", + "Value": "AWSLambda" + }, + "LambdaRoleArn": { + "Description": "IAM Role ARN used for Lambda connection", + "Value": { + "Ref": "LambdaRoleArn" + } + } + } +} + diff --git a/examples/atlas-streams/stream-connection/cluster-stream-connection.json b/examples/atlas-streams/stream-connection/cluster-stream-connection.json index da3b5eec8..2672c227d 100644 --- a/examples/atlas-streams/stream-connection/cluster-stream-connection.json +++ b/examples/atlas-streams/stream-connection/cluster-stream-connection.json @@ -11,9 +11,9 @@ "Type": "String", "Description": "Atlas Project Id" }, - "InstanceName": { + "WorkspaceName": { "Type": "String", - "Description": "Human-readable label that identifies the stream instance" + "Description": "Human-readable label that identifies the stream workspace" }, "ConnectionName": { "Type": "String", @@ -50,8 +50,8 @@ "ConnectionName": { "Ref": "ConnectionName" }, - "InstanceName": { - "Ref": "InstanceName" + "WorkspaceName": { + "Ref": "WorkspaceName" }, "ClusterName": { "Ref": "ClusterName" diff --git a/examples/atlas-streams/stream-connection/https-stream-connection.json b/examples/atlas-streams/stream-connection/https-stream-connection.json new file mode 100644 index 000000000..b0485a7ac --- /dev/null +++ b/examples/atlas-streams/stream-connection/https-stream-connection.json @@ -0,0 +1,79 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates one connection of type 'Https' for a given stream instance in the specified project", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id", + "MinLength": 24, + "MaxLength": 24, + "AllowedPattern": "^([a-f0-9]{24})$" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream workspace" + }, + "ConnectionName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream connection", + "Default": "HttpsConnection" + }, + "HttpsUrl": { + "Type": "String", + "Description": "URL endpoint for the HTTPS connection. Must be a valid HTTPS URL.", + "AllowedPattern": "^https://.*" + } + }, + "Mappings": {}, + "Resources": { + "StreamConnection": { + "Type": "MongoDB::Atlas::StreamConnection", + "Properties": { + "ProjectId": { + "Ref": "ProjectId" + }, + "Profile": { + "Ref": "Profile" + }, + "ConnectionName": { + "Ref": "ConnectionName" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "Type": "Https", + "Url": { + "Ref": "HttpsUrl" + }, + "Headers": { + "Content-Type": "application/json", + "Authorization": "Bearer updated-token-67891", + "X-API-Key": "my-new-api-key-123456" + } + } + } + }, + "Outputs": { + "ConnectionName": { + "Description": "Name of the created stream connection", + "Value": { + "Ref": "ConnectionName" + } + }, + "ConnectionType": { + "Description": "Type of the stream connection", + "Value": "Https" + }, + "HttpsUrl": { + "Description": "HTTPS endpoint URL", + "Value": { + "Ref": "HttpsUrl" + } + } + } +} diff --git a/examples/atlas-streams/stream-connection/kafka-oauth-stream-connection.json b/examples/atlas-streams/stream-connection/kafka-oauth-stream-connection.json new file mode 100644 index 000000000..df9fb0919 --- /dev/null +++ b/examples/atlas-streams/stream-connection/kafka-oauth-stream-connection.json @@ -0,0 +1,164 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates one connection of type 'Kafka' with OAuth authentication for a given stream instance in the specified project", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id", + "MinLength": 24, + "MaxLength": 24, + "AllowedPattern": "^([a-f0-9]{24})$" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream workspace" + }, + "ConnectionName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream connection", + "Default": "KafkaOAuthConnection" + }, + "BootstrapServers": { + "Type": "String", + "Description": "Comma separated list of Kafka broker addresses (e.g., 'pkc-xxxxx.us-east-1.aws.confluent.cloud:9092')" + }, + "OAuthTokenEndpointUrl": { + "Type": "String", + "Description": "OAuth 2.0 token endpoint URL for authentication" + }, + "OAuthClientId": { + "Type": "String", + "Description": "OAuth 2.0 client identifier" + }, + "OAuthClientSecret": { + "Type": "String", + "Description": "OAuth 2.0 client secret. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials.", + "NoEcho": true + }, + "OAuthScope": { + "Type": "String", + "Description": "OAuth 2.0 scope (e.g., 'read:messages write:messages')", + "Default": "" + }, + "SaslOauthbearerExtensions": { + "Type": "String", + "Description": "SASL OAUTHBEARER extensions (e.g., 'logicalCluster=lkc-kmom,identityPoolId=pool-lAr')", + "Default": "" + }, + "SecurityProtocol": { + "Type": "String", + "Description": "Describes the transport type. For OAuth, typically SASL_SSL.", + "Default": "SASL_SSL", + "AllowedValues": [ + "PLAINTEXT", + "SSL", + "SASL_PLAINTEXT", + "SASL_SSL" + ] + }, + "BrokerPublicCertificate": { + "Type": "String", + "Description": "A trusted, public x509 certificate for connecting to Kafka over SSL. Required if SecurityProtocol is SSL or SASL_SSL.", + "Default": "" + }, + "ConfigAutoOffsetReset": { + "Type": "String", + "Description": "Kafka consumer configuration for auto.offset.reset", + "Default": "earliest", + "AllowedValues": [ + "earliest", + "latest", + "none" + ] + } + }, + "Mappings": {}, + "Resources": { + "StreamConnection": { + "Type": "MongoDB::Atlas::StreamConnection", + "Properties": { + "ProjectId": { + "Ref": "ProjectId" + }, + "Profile": { + "Ref": "Profile" + }, + "ConnectionName": { + "Ref": "ConnectionName" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "Type": "Kafka", + "BootstrapServers": { + "Ref": "BootstrapServers" + }, + "Authentication": { + "Mechanism": "OAUTHBEARER", + "Method": "OIDC", + "TokenEndpointUrl": { + "Ref": "OAuthTokenEndpointUrl" + }, + "ClientId": { + "Ref": "OAuthClientId" + }, + "ClientSecret": { + "Ref": "OAuthClientSecret" + }, + "Scope": { + "Ref": "OAuthScope" + }, + "SaslOauthbearerExtensions": { + "Ref": "SaslOauthbearerExtensions" + } + }, + "Security": { + "Protocol": { + "Ref": "SecurityProtocol" + }, + "BrokerPublicCertificate": { + "Ref": "BrokerPublicCertificate" + } + }, + "Config": { + "auto.offset.reset": { + "Ref": "ConfigAutoOffsetReset" + } + }, + "Networking": { + "Access": { + "Type": "PUBLIC" + } + } + } + } + }, + "Outputs": { + "ConnectionName": { + "Description": "Name of the created stream connection", + "Value": { + "Ref": "ConnectionName" + } + }, + "ConnectionType": { + "Description": "Type of the stream connection", + "Value": "Kafka" + }, + "AuthenticationMechanism": { + "Description": "Kafka authentication mechanism", + "Value": "OAUTHBEARER" + }, + "BootstrapServers": { + "Description": "Kafka bootstrap servers", + "Value": { + "Ref": "BootstrapServers" + } + } + } +} + diff --git a/examples/atlas-streams/stream-connection/kafka-stream-connection.json b/examples/atlas-streams/stream-connection/kafka-stream-connection.json index 4b8fe0b8c..2040e2f1d 100644 --- a/examples/atlas-streams/stream-connection/kafka-stream-connection.json +++ b/examples/atlas-streams/stream-connection/kafka-stream-connection.json @@ -11,9 +11,9 @@ "Type": "String", "Description": "Atlas Project Id" }, - "InstanceName": { + "WorkspaceName": { "Type": "String", - "Description": "Human-readable label that identifies the stream instance" + "Description": "Human-readable label that identifies the stream workspace" }, "ConnectionName": { "Type": "String", @@ -59,8 +59,8 @@ "ConnectionName": { "Ref": "ConnectionName" }, - "InstanceName": { - "Ref": "InstanceName" + "WorkspaceName": { + "Ref": "WorkspaceName" }, "Type": "Kafka", "Authentication": { diff --git a/examples/atlas-streams/stream-connection/sample-stream-connection.json b/examples/atlas-streams/stream-connection/sample-stream-connection.json index aef9fcc4e..d03486c8c 100644 --- a/examples/atlas-streams/stream-connection/sample-stream-connection.json +++ b/examples/atlas-streams/stream-connection/sample-stream-connection.json @@ -11,9 +11,9 @@ "Type": "String", "Description": "Atlas Project Id" }, - "InstanceName": { + "WorkspaceName": { "Type": "String", - "Description": "Human-readable label that identifies the stream instance" + "Description": "Human-readable label that identifies the stream workspace" }, "ConnectionName": { "Type": "String", @@ -32,8 +32,8 @@ "Profile": { "Ref": "Profile" }, - "InstanceName": { - "Ref": "InstanceName" + "WorkspaceName": { + "Ref": "WorkspaceName" }, "Type": "Sample", "ConnectionName": { From b29e4732abd796e0339c9695d6a7f5d33a0846ce Mon Sep 17 00:00:00 2001 From: sivaram-mongodb Date: Tue, 13 Jan 2026 21:24:34 +0530 Subject: [PATCH 02/10] chore: Update Atlas SDK to v20250312012 for stream-connection resource --- cfn-resources/stream-connection/cmd/resource/mappings.go | 2 +- cfn-resources/stream-connection/cmd/resource/mappings_test.go | 2 +- cfn-resources/stream-connection/cmd/resource/resource.go | 2 +- cfn-resources/stream-connection/cmd/resource/resource_test.go | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cfn-resources/stream-connection/cmd/resource/mappings.go b/cfn-resources/stream-connection/cmd/resource/mappings.go index ab34c085a..961acd126 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings.go @@ -15,7 +15,7 @@ package resource import ( - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/mongodb/mongodbatlas-cloudformation-resources/util" ) diff --git a/cfn-resources/stream-connection/cmd/resource/mappings_test.go b/cfn-resources/stream-connection/cmd/resource/mappings_test.go index f59a3ec42..d34b22ea0 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings_test.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings_test.go @@ -17,7 +17,7 @@ package resource_test import ( "testing" - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/aws/smithy-go/ptr" "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-connection/cmd/resource" diff --git a/cfn-resources/stream-connection/cmd/resource/resource.go b/cfn-resources/stream-connection/cmd/resource/resource.go index 360fd07c2..e1e25e301 100644 --- a/cfn-resources/stream-connection/cmd/resource/resource.go +++ b/cfn-resources/stream-connection/cmd/resource/resource.go @@ -19,7 +19,7 @@ import ( "fmt" "net/http" - "go.mongodb.org/atlas-sdk/v20250312010/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" diff --git a/cfn-resources/stream-connection/cmd/resource/resource_test.go b/cfn-resources/stream-connection/cmd/resource/resource_test.go index 026c6d554..04419a86b 100644 --- a/cfn-resources/stream-connection/cmd/resource/resource_test.go +++ b/cfn-resources/stream-connection/cmd/resource/resource_test.go @@ -27,8 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20250312010/admin" - "go.mongodb.org/atlas-sdk/v20250312010/mockadmin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" + "go.mongodb.org/atlas-sdk/v20250312012/mockadmin" ) const ( From 1706c15bc5f889764f9e5c48573b98e21dbf0d29 Mon Sep 17 00:00:00 2001 From: sivaram-mongodb Date: Wed, 14 Jan 2026 11:12:43 +0530 Subject: [PATCH 03/10] feat: automate IAM role creation and cleanup for AWS Lambda stream connection tests; remove resource unit tests --- .../cmd/resource/resource_test.go | 392 ------------------ .../test/cfn-test-create-inputs.sh | 97 ++++- .../test/cfn-test-delete-inputs.sh | 64 +++ .../test/lambda-permissions-template.json | 14 + .../test/lambda-role-policy-template.json | 18 + 5 files changed, 190 insertions(+), 395 deletions(-) delete mode 100644 cfn-resources/stream-connection/cmd/resource/resource_test.go create mode 100644 cfn-resources/stream-connection/test/lambda-permissions-template.json create mode 100644 cfn-resources/stream-connection/test/lambda-role-policy-template.json diff --git a/cfn-resources/stream-connection/cmd/resource/resource_test.go b/cfn-resources/stream-connection/cmd/resource/resource_test.go deleted file mode 100644 index 04419a86b..000000000 --- a/cfn-resources/stream-connection/cmd/resource/resource_test.go +++ /dev/null @@ -1,392 +0,0 @@ -// Copyright 2026 MongoDB Inc -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package resource_test - -import ( - "fmt" - "net/http" - "testing" - - "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" - "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" - "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-connection/cmd/resource" - "github.com/mongodb/mongodbatlas-cloudformation-resources/util" - "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "go.mongodb.org/atlas-sdk/v20250312012/admin" - "go.mongodb.org/atlas-sdk/v20250312012/mockadmin" -) - -const ( - testProjectID = "507f1f77bcf86cd799439011" - testConnectionName = "test-connection" - testWorkspaceName = "test-workspace" - testProfile = "default" - testClusterName = "test-cluster" - testRole = "atlasAdmin" - testRoleType = "BUILT_IN" - msgRequired = "required" - msgWorkspaceRequired = "Either WorkspaceName or InstanceName must be provided" -) - -func createTestClusterConnectionModel() *resource.Model { - return &resource.Model{ - Profile: util.StringPtr(testProfile), - ProjectId: util.StringPtr(testProjectID), - ConnectionName: util.StringPtr(testConnectionName), - WorkspaceName: util.StringPtr(testWorkspaceName), - Type: util.StringPtr(resource.ClusterConnectionType), - ClusterName: util.StringPtr(testClusterName), - DbRoleToExecute: &resource.DBRoleToExecute{ - Role: util.StringPtr(testRole), - Type: util.StringPtr(testRoleType), - }, - } -} - -func createTestStreamConnectionResponse(connType string) *admin.StreamsConnection { - name := testConnectionName - response := &admin.StreamsConnection{ - Name: &name, - Type: &connType, - } - if connType == resource.ClusterConnectionType { - response.ClusterName = util.StringPtr(testClusterName) - response.DbRoleToExecute = &admin.DBRoleToExecute{ - Role: admin.PtrString(testRole), - Type: admin.PtrString(testRoleType), - } - } - return response -} - -func TestConstants(t *testing.T) { - assert.Equal(t, "Cluster", resource.ClusterConnectionType) - assert.Equal(t, "Kafka", resource.KafkaConnectionType) - assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName, constants.Type}, resource.CreateRequiredFields) - assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName}, resource.ReadRequiredFields) - assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName, constants.Type}, resource.UpdateRequiredFields) - assert.Equal(t, []string{constants.ProjectID, constants.ConnectionName}, resource.DeleteRequiredFields) - assert.Equal(t, []string{constants.ProjectID}, resource.ListRequiredFields) -} - -func TestValidationErrors(t *testing.T) { - testCases := map[string]struct { - operation func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) - currentModel *resource.Model - expectedMsg string - }{ - "Create_missingProjectId": { - operation: resource.Create, - currentModel: &resource.Model{ - Profile: util.StringPtr(testProfile), ConnectionName: util.StringPtr(testConnectionName), - WorkspaceName: util.StringPtr(testWorkspaceName), Type: util.StringPtr(resource.ClusterConnectionType), - }, - expectedMsg: msgRequired, - }, - "Create_missingConnectionName": { - operation: resource.Create, - currentModel: &resource.Model{ - Profile: util.StringPtr(testProfile), ProjectId: util.StringPtr(testProjectID), - WorkspaceName: util.StringPtr(testWorkspaceName), Type: util.StringPtr(resource.ClusterConnectionType), - }, - expectedMsg: msgRequired, - }, - "Create_missingWorkspaceOrInstanceName": { - operation: resource.Create, - currentModel: &resource.Model{ - Profile: util.StringPtr(testProfile), ProjectId: util.StringPtr(testProjectID), - ConnectionName: util.StringPtr(testConnectionName), Type: util.StringPtr(resource.ClusterConnectionType), - }, - expectedMsg: msgWorkspaceRequired, - }, - "Read_missingProjectId": { - operation: resource.Read, - currentModel: &resource.Model{ConnectionName: util.StringPtr(testConnectionName), WorkspaceName: util.StringPtr(testWorkspaceName)}, - expectedMsg: msgRequired, - }, - "Read_missingWorkspaceOrInstanceName": { - operation: resource.Read, - currentModel: &resource.Model{ProjectId: util.StringPtr(testProjectID), ConnectionName: util.StringPtr(testConnectionName)}, - expectedMsg: msgWorkspaceRequired, - }, - "Update_missingProjectId": { - operation: resource.Update, - currentModel: &resource.Model{ - Profile: util.StringPtr(testProfile), ConnectionName: util.StringPtr(testConnectionName), - WorkspaceName: util.StringPtr(testWorkspaceName), Type: util.StringPtr(resource.ClusterConnectionType), - }, - expectedMsg: msgRequired, - }, - "Delete_missingProjectId": { - operation: resource.Delete, - currentModel: &resource.Model{ConnectionName: util.StringPtr(testConnectionName), WorkspaceName: util.StringPtr(testWorkspaceName)}, - expectedMsg: msgRequired, - }, - "List_missingProjectId": { - operation: resource.List, - currentModel: &resource.Model{WorkspaceName: util.StringPtr(testWorkspaceName)}, - expectedMsg: msgRequired, - }, - "List_missingWorkspaceOrInstanceName": { - operation: resource.List, - currentModel: &resource.Model{ProjectId: util.StringPtr(testProjectID)}, - expectedMsg: msgWorkspaceRequired, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - needsMock := name == "Create_missingWorkspaceOrInstanceName" || - name == "Read_missingWorkspaceOrInstanceName" || - name == "List_missingWorkspaceOrInstanceName" - if needsMock { - cleanup := setupMockClient(t, func(*mockadmin.StreamsApi) {}) - defer cleanup() - } - event, err := tc.operation(handler.Request{}, nil, tc.currentModel) - require.NoError(t, err) - assert.Equal(t, handler.Failed, event.OperationStatus) - assert.Contains(t, event.Message, tc.expectedMsg) - }) - } -} - -func setupMockClient(t *testing.T, mockSetup func(*mockadmin.StreamsApi)) func() { - t.Helper() - originalInitEnv := resource.InitEnvWithLatestClient - mockStreamsAPI := mockadmin.NewStreamsApi(t) - mockSetup(mockStreamsAPI) - mockClient := &admin.APIClient{StreamsApi: mockStreamsAPI} - resource.InitEnvWithLatestClient = func(req handler.Request, currentModel *resource.Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { - return mockClient, nil - } - return func() { resource.InitEnvWithLatestClient = originalInitEnv } -} - -func TestCRUDOperations(t *testing.T) { - testCases := map[string]struct { - operation func(handler.Request, *resource.Model, *resource.Model) (handler.ProgressEvent, error) - model *resource.Model - mockSetup func(*mockadmin.StreamsApi) - validateResult func(t *testing.T, event handler.ProgressEvent) - expectedStatus handler.Status - }{ - "Create_success": { - operation: resource.Create, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - response := createTestStreamConnectionResponse(resource.ClusterConnectionType) - m.EXPECT().CreateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.CreateStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().CreateStreamConnectionExecute(mock.Anything). - Return(response, &http.Response{StatusCode: 201}, nil) - }, - expectedStatus: handler.Success, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.Equal(t, "Create Completed", event.Message) - assert.NotNil(t, event.ResourceModel) - }, - }, - "Create_kafka": { - operation: resource.Create, - model: &resource.Model{ - Profile: util.StringPtr(testProfile), ProjectId: util.StringPtr(testProjectID), - ConnectionName: util.StringPtr(testConnectionName), WorkspaceName: util.StringPtr(testWorkspaceName), - Type: util.StringPtr(resource.KafkaConnectionType), BootstrapServers: util.StringPtr("broker1:9092"), - Security: &resource.StreamsKafkaSecurity{Protocol: util.StringPtr("SSL")}, - Authentication: &resource.StreamsKafkaAuthentication{Mechanism: util.StringPtr("PLAIN")}, - }, - mockSetup: func(m *mockadmin.StreamsApi) { - bootstrap := "broker1:9092" - response := &admin.StreamsConnection{ - Name: util.StringPtr(testConnectionName), Type: util.StringPtr(resource.KafkaConnectionType), - BootstrapServers: &bootstrap, - } - m.EXPECT().CreateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.CreateStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().CreateStreamConnectionExecute(mock.Anything). - Return(response, &http.Response{StatusCode: 201}, nil) - }, - expectedStatus: handler.Success, - }, - "Create_apiError": { - operation: resource.Create, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - m.EXPECT().CreateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.CreateStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().CreateStreamConnectionExecute(mock.Anything). - Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) - }, - expectedStatus: handler.Failed, - }, - "Read_success": { - operation: resource.Read, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - response := createTestStreamConnectionResponse(resource.ClusterConnectionType) - m.EXPECT().GetStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.GetStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().GetStreamConnectionExecute(mock.Anything). - Return(response, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - }, - "Read_notFound": { - operation: resource.Read, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - m.EXPECT().GetStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.GetStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().GetStreamConnectionExecute(mock.Anything). - Return(nil, &http.Response{StatusCode: 404}, fmt.Errorf("not found")) - }, - expectedStatus: handler.Failed, - }, - "Update_success": { - operation: resource.Update, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - response := createTestStreamConnectionResponse(resource.ClusterConnectionType) - m.EXPECT().UpdateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.UpdateStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().UpdateStreamConnectionExecute(mock.Anything). - Return(response, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - }, - "Update_apiError": { - operation: resource.Update, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - m.EXPECT().UpdateStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.UpdateStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().UpdateStreamConnectionExecute(mock.Anything). - Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) - }, - expectedStatus: handler.Failed, - }, - "Delete_success": { - operation: resource.Delete, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - m.EXPECT().DeleteStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.DeleteStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().DeleteStreamConnectionExecute(mock.Anything). - Return(&http.Response{StatusCode: 204}, nil) - }, - expectedStatus: handler.Success, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.Equal(t, "Delete Completed", event.Message) - assert.Nil(t, event.ResourceModel) - }, - }, - "Delete_notFound": { - operation: resource.Delete, - model: createTestClusterConnectionModel(), - mockSetup: func(m *mockadmin.StreamsApi) { - m.EXPECT().DeleteStreamConnection(mock.Anything, mock.Anything, mock.Anything, mock.Anything). - Return(admin.DeleteStreamConnectionApiRequest{ApiService: m}) - m.EXPECT().DeleteStreamConnectionExecute(mock.Anything). - Return(&http.Response{StatusCode: 404}, fmt.Errorf("not found")) - }, - expectedStatus: handler.Failed, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.Equal(t, string(types.HandlerErrorCodeNotFound), event.HandlerErrorCode) - }, - }, - "List_success": { - operation: resource.List, - model: &resource.Model{ - ProjectId: util.StringPtr(testProjectID), WorkspaceName: util.StringPtr(testWorkspaceName), - Profile: util.StringPtr(testProfile), - }, - mockSetup: func(m *mockadmin.StreamsApi) { - connections := []admin.StreamsConnection{*createTestStreamConnectionResponse(resource.ClusterConnectionType)} - totalCount := 1 - response := &admin.PaginatedApiStreamsConnection{ - Results: &connections, TotalCount: &totalCount, - } - m.EXPECT().ListStreamConnectionsWithParams(mock.Anything, mock.Anything). - Return(admin.ListStreamConnectionsApiRequest{ApiService: m}) - m.EXPECT().ListStreamConnectionsExecute(mock.Anything). - Return(response, &http.Response{StatusCode: 200}, nil) - }, - expectedStatus: handler.Success, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.Len(t, event.ResourceModels, 1) - }, - }, - "List_withPagination": { - operation: resource.List, - model: &resource.Model{ - ProjectId: util.StringPtr(testProjectID), WorkspaceName: util.StringPtr(testWorkspaceName), - Profile: util.StringPtr(testProfile), - }, - mockSetup: func(m *mockadmin.StreamsApi) { - conn1 := []admin.StreamsConnection{*createTestStreamConnectionResponse(resource.ClusterConnectionType)} - conn2 := []admin.StreamsConnection{*createTestStreamConnectionResponse(resource.ClusterConnectionType)} - totalCount := 2 - m.EXPECT().ListStreamConnectionsWithParams(mock.Anything, mock.Anything). - Return(admin.ListStreamConnectionsApiRequest{ApiService: m}).Times(2) - m.EXPECT().ListStreamConnectionsExecute(mock.Anything). - Return(&admin.PaginatedApiStreamsConnection{Results: &conn1, TotalCount: &totalCount}, - &http.Response{StatusCode: 200}, nil).Once() - m.EXPECT().ListStreamConnectionsExecute(mock.Anything). - Return(&admin.PaginatedApiStreamsConnection{Results: &conn2, TotalCount: &totalCount}, - &http.Response{StatusCode: 200}, nil).Once() - }, - expectedStatus: handler.Success, - validateResult: func(t *testing.T, event handler.ProgressEvent) { - t.Helper() - assert.GreaterOrEqual(t, len(event.ResourceModels), 2) - }, - }, - "List_apiError": { - operation: resource.List, - model: &resource.Model{ - ProjectId: util.StringPtr(testProjectID), WorkspaceName: util.StringPtr(testWorkspaceName), - Profile: util.StringPtr(testProfile), - }, - mockSetup: func(m *mockadmin.StreamsApi) { - m.EXPECT().ListStreamConnectionsWithParams(mock.Anything, mock.Anything). - Return(admin.ListStreamConnectionsApiRequest{ApiService: m}) - m.EXPECT().ListStreamConnectionsExecute(mock.Anything). - Return(nil, &http.Response{StatusCode: 500}, fmt.Errorf("internal server error")) - }, - expectedStatus: handler.Failed, - }, - } - - for name, tc := range testCases { - t.Run(name, func(t *testing.T) { - cleanup := setupMockClient(t, tc.mockSetup) - defer cleanup() - event, err := tc.operation(handler.Request{}, nil, tc.model) - require.NoError(t, err) - assert.Equal(t, tc.expectedStatus, event.OperationStatus) - if tc.validateResult != nil { - tc.validateResult(t, event) - } - }) - } -} diff --git a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh index 7a3451421..39329c4aa 100755 --- a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh @@ -46,6 +46,94 @@ atlas clusters create "${clusterName}" --projectId "${projectId}" --backup --pro atlas clusters watch "${clusterName}" --projectId "${projectId}" echo -e "Created Cluster \"${clusterName}\"" +# AWS IAM role creation and authorization for Lambda connections +echo "--------------------------------AWS Lambda IAM Role creation starts ----------------------------" + +# Role names for CREATE and UPDATE scenarios +iamRoleNameCreate="mongodb-atlas-streams-lambda-$(date +%s)-${RANDOM}" +iamRoleNameUpdate="mongodb-atlas-streams-lambda-$(date +%s)-${RANDOM}-updated" +policyName="atlas-lambda-invoke-policy" + +echo "Creating IAM roles: ${iamRoleNameCreate} and ${iamRoleNameUpdate}" + +# Create first cloud provider access entry (for CREATE role) +roleIdCreate=$(atlas cloudProviders accessRoles aws create --projectId "${projectId}" --output json | jq -r '.roleId') +echo "Created Atlas cloud provider access entry for CREATE role: ${roleIdCreate}" + +# Create second cloud provider access entry (for UPDATE role) +roleIdUpdate=$(atlas cloudProviders accessRoles aws create --projectId "${projectId}" --output json | jq -r '.roleId') +echo "Created Atlas cloud provider access entry for UPDATE role: ${roleIdUpdate}" + +# Get Atlas AWS Account ARN and External ID for CREATE role +atlasAWSAccountArnCreate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdCreate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAWSAccountArn') +atlasAssumedRoleExternalIdCreate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdCreate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAssumedRoleExternalId') + +# Get Atlas AWS Account ARN and External ID for UPDATE role +atlasAWSAccountArnUpdate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdUpdate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAWSAccountArn') +atlasAssumedRoleExternalIdUpdate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdUpdate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAssumedRoleExternalId') + +# Create trust policy for CREATE role +jq --arg atlasAssumedRoleExternalId "$atlasAssumedRoleExternalIdCreate" \ + --arg atlasAWSAccountArn "$atlasAWSAccountArnCreate" \ + '.Statement[0].Principal.AWS?|=$atlasAWSAccountArn | .Statement[0].Condition.StringEquals["sts:ExternalId"]?|=$atlasAssumedRoleExternalId' \ + "$(dirname "$0")/lambda-role-policy-template.json" >"$(dirname "$0")/lambda-trust-policy-create.json" + +# Create trust policy for UPDATE role +jq --arg atlasAssumedRoleExternalId "$atlasAssumedRoleExternalIdUpdate" \ + --arg atlasAWSAccountArn "$atlasAWSAccountArnUpdate" \ + '.Statement[0].Principal.AWS?|=$atlasAWSAccountArn | .Statement[0].Condition.StringEquals["sts:ExternalId"]?|=$atlasAssumedRoleExternalId' \ + "$(dirname "$0")/lambda-role-policy-template.json" >"$(dirname "$0")/lambda-trust-policy-update.json" + +echo "--------------------------------AWS IAM Role creation starts ----------------------------" + +# Check if CREATE role exists, delete if found +awsRoleIdCreate=$(aws iam get-role --role-name "${iamRoleNameCreate}" 2>/dev/null | jq --arg roleName "${iamRoleNameCreate}" -r '.Role | select(.RoleName==$roleName) | .RoleId' || echo "") +if [ -n "$awsRoleIdCreate" ]; then + aws iam delete-role-policy --role-name "${iamRoleNameCreate}" --policy-name "${policyName}" 2>/dev/null || true + aws iam delete-role --role-name "${iamRoleNameCreate}" + echo "Deleted existing CREATE role" +fi + +# Create CREATE role +awsRoleIdCreate=$(aws iam create-role --role-name "${iamRoleNameCreate}" --assume-role-policy-document file://"$(dirname "$0")"/lambda-trust-policy-create.json | jq --arg roleName "${iamRoleNameCreate}" -r '.Role | select(.RoleName==$roleName) | .RoleId') +echo "Created AWS IAM role for CREATE: ${awsRoleIdCreate}" + +# Check if UPDATE role exists, delete if found +awsRoleIdUpdate=$(aws iam get-role --role-name "${iamRoleNameUpdate}" 2>/dev/null | jq --arg roleName "${iamRoleNameUpdate}" -r '.Role | select(.RoleName==$roleName) | .RoleId' || echo "") +if [ -n "$awsRoleIdUpdate" ]; then + aws iam delete-role-policy --role-name "${iamRoleNameUpdate}" --policy-name "${policyName}" 2>/dev/null || true + aws iam delete-role --role-name "${iamRoleNameUpdate}" + echo "Deleted existing UPDATE role" +fi + +# Create UPDATE role +awsRoleIdUpdate=$(aws iam create-role --role-name "${iamRoleNameUpdate}" --assume-role-policy-document file://"$(dirname "$0")"/lambda-trust-policy-update.json | jq --arg roleName "${iamRoleNameUpdate}" -r '.Role | select(.RoleName==$roleName) | .RoleId') +echo "Created AWS IAM role for UPDATE: ${awsRoleIdUpdate}" + +# Get role ARNs +awsArnCreate=$(aws iam get-role --role-name "${iamRoleNameCreate}" | jq --arg roleName "${iamRoleNameCreate}" -r '.Role | select(.RoleName==$roleName) | .Arn') +awsArnUpdate=$(aws iam get-role --role-name "${iamRoleNameUpdate}" | jq --arg roleName "${iamRoleNameUpdate}" -r '.Role | select(.RoleName==$roleName) | .Arn') + +# Attach Lambda permissions to both roles +aws iam put-role-policy --role-name "${iamRoleNameCreate}" --policy-name "${policyName}" --policy-document file://"$(dirname "$0")"/lambda-permissions-template.json +aws iam put-role-policy --role-name "${iamRoleNameUpdate}" --policy-name "${policyName}" --policy-document file://"$(dirname "$0")"/lambda-permissions-template.json +echo "Attached Lambda invoke permissions to both roles" + +echo "--------------------------------AWS IAM Role creation ends ----------------------------" + +# Wait for AWS IAM role to propagate (similar to encryption-at-rest pattern) +echo "Waiting for IAM roles to propagate..." +sleep 65 + +# Authorize the roles in Atlas +echo "--------------------------------Authorize MongoDB Atlas Roles starts ----------------------------" +atlas cloudProviders accessRoles aws authorize "${roleIdCreate}" --iamAssumedRoleArn "${awsArnCreate}" --projectId "${projectId}" +echo "Authorized CREATE role: ${iamRoleNameCreate}" + +atlas cloudProviders accessRoles aws authorize "${roleIdUpdate}" --iamAssumedRoleArn "${awsArnUpdate}" --projectId "${projectId}" +echo "Authorized UPDATE role: ${iamRoleNameUpdate}" +echo "--------------------------------Authorize MongoDB Atlas Roles ends ----------------------------" + jq --arg cluster_name "$clusterName" \ --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ @@ -99,17 +187,21 @@ jq --arg workspace_name "$workspaceName" \ jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ + --arg role_arn "$awsArnCreate" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .WorkspaceName?|=$workspace_name' \ + | .WorkspaceName?|=$workspace_name + | .Aws.RoleArn=$role_arn' \ "$(dirname "$0")/inputs_4_create.json" >"inputs/inputs_4_create.json" jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ + --arg role_arn "$awsArnUpdate" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .WorkspaceName?|=$workspace_name' \ + | .WorkspaceName?|=$workspace_name + | .Aws.RoleArn=$role_arn' \ "$(dirname "$0")/inputs_4_update.json" >"inputs/inputs_4_update.json" jq --arg workspace_name "$workspaceName" \ @@ -143,4 +235,3 @@ jq --arg workspace_name "$workspaceName" \ | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_6_update.json" >"inputs/inputs_6_update.json" - \ No newline at end of file diff --git a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh index 91289aef8..247354ab2 100755 --- a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh @@ -45,6 +45,70 @@ else echo "failed to delete the stream workspace/instance with name ${workspaceOrInstanceName}" fi +# Delete AWS Lambda IAM roles if they exist +echo "--------------------------------delete AWS Lambda IAM roles starts ----------------------------" + +# Check if Lambda input files exist +if [ -f "./inputs/inputs_4_create.json" ]; then + echo "Found Lambda connection inputs, cleaning up IAM roles..." + + policyName="atlas-lambda-invoke-policy" + + # Extract role ARN from CREATE input file + roleArnCreate=$(jq -r '.Aws.RoleArn // empty' ./inputs/inputs_4_create.json) + # Extract role name from ARN (everything after the last '/') + iamRoleNameCreate=$(echo "${roleArnCreate}" | awk -F'/' '{print $NF}') + + # Extract role ARN from UPDATE input file + roleArnUpdate=$(jq -r '.Aws.RoleArn // empty' ./inputs/inputs_4_update.json) + # Extract role name from ARN (everything after the last '/') + iamRoleNameUpdate=$(echo "${roleArnUpdate}" | awk -F'/' '{print $NF}') + + # Get external IDs from trust policy files and find roleIds in Atlas + if [ -f "$(dirname "$0")/lambda-trust-policy-create.json" ]; then + atlasAssumedRoleExternalIdCreate=$(jq -r '.Statement[0].Condition.StringEquals["sts:ExternalId"]' "$(dirname "$0")/lambda-trust-policy-create.json") + roleIdCreate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg extId "${atlasAssumedRoleExternalIdCreate}" -r '.awsIamRoles[] | select(.atlasAssumedRoleExternalId | test($extId)) | .roleId') + + if [ -n "${roleIdCreate}" ] && [ "${roleIdCreate}" != "null" ]; then + echo "Deauthorizing CREATE role from Atlas: ${roleIdCreate}" + atlas cloudProviders accessRoles aws deauthorize "${roleIdCreate}" --projectId "${projectId}" --force || echo "Failed to deauthorize CREATE role" + fi + fi + + if [ -f "$(dirname "$0")/lambda-trust-policy-update.json" ]; then + atlasAssumedRoleExternalIdUpdate=$(jq -r '.Statement[0].Condition.StringEquals["sts:ExternalId"]' "$(dirname "$0")/lambda-trust-policy-update.json") + roleIdUpdate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg extId "${atlasAssumedRoleExternalIdUpdate}" -r '.awsIamRoles[] | select(.atlasAssumedRoleExternalId | test($extId)) | .roleId') + + if [ -n "${roleIdUpdate}" ] && [ "${roleIdUpdate}" != "null" ]; then + echo "Deauthorizing UPDATE role from Atlas: ${roleIdUpdate}" + atlas cloudProviders accessRoles aws deauthorize "${roleIdUpdate}" --projectId "${projectId}" --force || echo "Failed to deauthorize UPDATE role" + fi + fi + + # Delete CREATE IAM role + if [ -n "${iamRoleNameCreate}" ] && [ "${iamRoleNameCreate}" != "null" ] && [ "${iamRoleNameCreate}" != "" ]; then + echo "Deleting CREATE IAM role: ${iamRoleNameCreate}" + aws iam delete-role-policy --role-name "${iamRoleNameCreate}" --policy-name "${policyName}" 2>/dev/null || echo "Policy already deleted or doesn't exist" + aws iam delete-role --role-name "${iamRoleNameCreate}" 2>/dev/null || echo "Role already deleted or doesn't exist" + fi + + # Delete UPDATE IAM role + if [ -n "${iamRoleNameUpdate}" ] && [ "${iamRoleNameUpdate}" != "null" ] && [ "${iamRoleNameUpdate}" != "" ]; then + echo "Deleting UPDATE IAM role: ${iamRoleNameUpdate}" + aws iam delete-role-policy --role-name "${iamRoleNameUpdate}" --policy-name "${policyName}" 2>/dev/null || echo "Policy already deleted or doesn't exist" + aws iam delete-role --role-name "${iamRoleNameUpdate}" 2>/dev/null || echo "Role already deleted or doesn't exist" + fi + + # Clean up temporary files + rm -f "$(dirname "$0")/lambda-trust-policy-create.json" + rm -f "$(dirname "$0")/lambda-trust-policy-update.json" + + echo "Cleaned up Lambda IAM roles and temporary files" +else + echo "No Lambda connection inputs found, skipping IAM role cleanup" +fi +echo "--------------------------------delete AWS Lambda IAM roles ends ----------------------------" + #delete project if atlas projects delete "$projectId" --force; then echo "$projectId project deletion OK" diff --git a/cfn-resources/stream-connection/test/lambda-permissions-template.json b/cfn-resources/stream-connection/test/lambda-permissions-template.json new file mode 100644 index 000000000..fdeef7e80 --- /dev/null +++ b/cfn-resources/stream-connection/test/lambda-permissions-template.json @@ -0,0 +1,14 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "lambda:InvokeFunction", + "lambda:InvokeAsync" + ], + "Resource": "*" + } + ] +} + diff --git a/cfn-resources/stream-connection/test/lambda-role-policy-template.json b/cfn-resources/stream-connection/test/lambda-role-policy-template.json new file mode 100644 index 000000000..e8209f2b6 --- /dev/null +++ b/cfn-resources/stream-connection/test/lambda-role-policy-template.json @@ -0,0 +1,18 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "" + } + } + } + ] +} + From db5e7f0b9e6fb883cb8ba9f55113e92be3ad5a26 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Thu, 15 Jan 2026 15:51:16 -0500 Subject: [PATCH 04/10] CLOUDP-369806-stream-connection addressing review comments --- cfn-resources/stream-connection/cmd/resource/resource.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cfn-resources/stream-connection/cmd/resource/resource.go b/cfn-resources/stream-connection/cmd/resource/resource.go index e1e25e301..81b1c73c1 100644 --- a/cfn-resources/stream-connection/cmd/resource/resource.go +++ b/cfn-resources/stream-connection/cmd/resource/resource.go @@ -222,9 +222,13 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P for i := range accumulatedStreamConns { model := GetStreamConnectionModel(&accumulatedStreamConns[i], nil) model.ProjectId = currentModel.ProjectId + // Set both WorkspaceName and InstanceName for consistency and backward compatibility + // InstanceName is deprecated but we maintain it for backward compatibility if currentModel.WorkspaceName != nil { model.WorkspaceName = currentModel.WorkspaceName - } else { + model.InstanceName = currentModel.WorkspaceName + } else if currentModel.InstanceName != nil { + model.WorkspaceName = currentModel.InstanceName model.InstanceName = currentModel.InstanceName } model.Profile = currentModel.Profile From 0f389c985bace249bfaf2d87082696a3c2088c47 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Thu, 15 Jan 2026 17:02:27 -0500 Subject: [PATCH 05/10] CLOUDP-369806-stream-connection Update readme in example --- .../atlas-streams/stream-connection/README.md | 297 +++--------------- 1 file changed, 39 insertions(+), 258 deletions(-) diff --git a/examples/atlas-streams/stream-connection/README.md b/examples/atlas-streams/stream-connection/README.md index b565726ec..c06455f46 100644 --- a/examples/atlas-streams/stream-connection/README.md +++ b/examples/atlas-streams/stream-connection/README.md @@ -1,258 +1,39 @@ -# MongoDB::Atlas::StreamConnection Examples - -This directory contains example CloudFormation templates for creating Stream Connections in MongoDB Atlas. - -## Prerequisites - -1. **Atlas Project**: You need an existing Atlas project. Get your Project ID from the Atlas UI or using: - ```bash - atlas projects list - ``` - -2. **Stream Workspace**: You need an existing Stream Workspace (formerly Stream Instance). Create one using: - ```bash - atlas streams instances create --projectId --region VIRGINIA_USA --provider AWS - ``` - -3. **Atlas Cluster** (for Cluster type connections): You need an existing Atlas cluster. Create one using: - ```bash - atlas clusters create --projectId --provider AWS --region US_EAST_1 --members 3 --tier M10 - ``` - -4. **AWS Credentials**: Ensure your AWS credentials are configured with permissions to: - - Create/update/delete CloudFormation stacks - - Access AWS Secrets Manager (for storing Atlas API keys) - -5. **Atlas API Keys**: Store your Atlas API keys in AWS Secrets Manager: - ```bash - aws secretsmanager create-secret \ - --name cfn/atlas/profile/default \ - --secret-string '{"PublicKey":"YOUR_PUBLIC_KEY","PrivateKey":"YOUR_PRIVATE_KEY"}' - ``` - -## Example Templates - -### 1. Cluster Stream Connection (`cluster-stream-connection.json`) - -Creates a connection of type `Cluster` that connects a Stream Workspace to an Atlas cluster. - -**Parameters:** -- `ProjectId`: Your Atlas project ID (24-hexadecimal characters) -- `WorkspaceName`: Name of the existing Stream Workspace -- `ConnectionName`: Name for the stream connection -- `ClusterName`: Name of the existing Atlas cluster -- `DbRole`: Database role name (e.g., "atlasAdmin", "readWriteAnyDatabase") -- `DbRoleType`: Type of role - "BUILT_IN" or "CUSTOM" -- `Profile`: AWS Secrets Manager profile name (default: "default") - -**Deploy:** -```bash -aws cloudformation deploy \ - --template-file examples/atlas-streams/stream-connection/cluster-stream-connection.json \ - --stack-name stream-connection-cluster \ - --parameter-overrides \ - ProjectId=YOUR_PROJECT_ID \ - WorkspaceName=YOUR_WORKSPACE_NAME \ - ConnectionName=my-cluster-connection \ - ClusterName=YOUR_CLUSTER_NAME \ - DbRole=atlasAdmin \ - DbRoleType=BUILT_IN \ - --capabilities CAPABILITY_IAM \ - --region us-east-1 -``` - -**Verify with Atlas CLI:** -```bash -# List all stream connections for the workspace -atlas streams connections list --projectId - -# Get specific connection details -atlas streams connections get --projectId -``` - -**Expected Output:** -- Connection should appear in the list with Type: "Cluster" -- Connection should show ClusterName matching your cluster -- DbRoleToExecute should match the provided role - -### 2. Kafka Stream Connection (`kafka-stream-connection.json`) - -Creates a connection of type `Kafka` that connects a Stream Workspace to a Kafka cluster. - -**Parameters:** -- `ProjectId`: Your Atlas project ID -- `WorkspaceName`: Name of the existing Stream Workspace -- `ConnectionName`: Name for the stream connection -- `BootstrapServers`: Comma-separated list of Kafka broker addresses (e.g., "localhost:9092,localhost:9093") -- `AuthMechanism`: Authentication mechanism - "PLAIN", "SCRAM-256", or "SCRAM-512" -- `AuthUsername`: Kafka username -- `AuthPassword`: Kafka password (will be hidden in console) -- `SecurityProtocol`: "PLAINTEXT" or "SSL" -- `BrokerPublicCertificate`: X.509 certificate for SSL connections (required if SecurityProtocol is SSL) -- `Profile`: AWS Secrets Manager profile name (default: "default") - -**Deploy:** -```bash -aws cloudformation deploy \ - --template-file examples/atlas-streams/stream-connection/kafka-stream-connection.json \ - --stack-name stream-connection-kafka \ - --parameter-overrides \ - ProjectId=YOUR_PROJECT_ID \ - WorkspaceName=YOUR_WORKSPACE_NAME \ - ConnectionName=my-kafka-connection \ - BootstrapServers=localhost:9092,localhost:9093 \ - AuthMechanism=PLAIN \ - AuthUsername=kafka-user \ - AuthPassword=kafka-password \ - SecurityProtocol=PLAINTEXT \ - --capabilities CAPABILITY_IAM \ - --region us-east-1 -``` - -**Verify with Atlas CLI:** -```bash -# List all stream connections -atlas streams connections list --projectId - -# Get Kafka connection details -atlas streams connections get --projectId -``` - -**Expected Output:** -- Connection should appear with Type: "Kafka" -- BootstrapServers should match your Kafka cluster -- Authentication mechanism should match the provided value - -### 3. Sample Stream Connection (`sample-stream-connection.json`) - -Creates a connection of type `Sample` that uses a sample dataset (e.g., `sample_stream_solar`). - -**Parameters:** -- `ProjectId`: Your Atlas project ID -- `WorkspaceName`: Name of the existing Stream Workspace -- `ConnectionName`: Name of the sample dataset (default: "sample_stream_solar") -- `Profile`: AWS Secrets Manager profile name (default: "default") - -**Deploy:** -```bash -aws cloudformation deploy \ - --template-file examples/atlas-streams/stream-connection/sample-stream-connection.json \ - --stack-name stream-connection-sample \ - --parameter-overrides \ - ProjectId=YOUR_PROJECT_ID \ - WorkspaceName=YOUR_WORKSPACE_NAME \ - ConnectionName=sample_stream_solar \ - --capabilities CAPABILITY_IAM \ - --region us-east-1 -``` - -**Verify with Atlas CLI:** -```bash -# List all stream connections -atlas streams connections list --projectId - -# Get sample connection details -atlas streams connections get sample_stream_solar --projectId -``` - -**Expected Output:** -- Connection should appear with Type: "Sample" -- ConnectionName should be "sample_stream_solar" - -## Field Mapping: CFN Properties → Atlas API - -| CFN Property | Atlas API Field | Notes | -|-------------|----------------|-------| -| `ProjectId` | `groupId` | 24-hexadecimal character project ID | -| `WorkspaceName` | `tenantName` | Stream workspace name (preferred over InstanceName) | -| `InstanceName` | `tenantName` | Deprecated - use WorkspaceName instead | -| `ConnectionName` | `name` | Unique connection name within workspace | -| `Type` | `type` | Connection type: Cluster, Kafka, Sample, AWSLambda, Https | -| `ClusterName` | `clusterName` | Required for Cluster type | -| `DbRoleToExecute.Role` | `dbRoleToExecute.role` | Database role name | -| `DbRoleToExecute.Type` | `dbRoleToExecute.type` | BUILT_IN or CUSTOM | -| `BootstrapServers` | `bootstrapServers` | Required for Kafka type | -| `Authentication.Mechanism` | `authentication.mechanism` | PLAIN, SCRAM-256, SCRAM-512, OAUTHBEARER | -| `Security.Protocol` | `security.protocol` | PLAINTEXT or SSL | - -## Template Validation - -Before deploying, validate the template syntax: - -```bash -# Validate cluster connection template -aws cloudformation validate-template \ - --template-body file://examples/atlas-streams/stream-connection/cluster-stream-connection.json - -# Validate Kafka connection template -aws cloudformation validate-template \ - --template-body file://examples/atlas-streams/stream-connection/kafka-stream-connection.json - -# Validate sample connection template -aws cloudformation validate-template \ - --template-body file://examples/atlas-streams/stream-connection/sample-stream-connection.json -``` - -If validation succeeds, the command will return JSON with template parameters and description. Any syntax errors will be reported. - -## Atlas CLI Validation - -After deploying a stack, verify the connection was created correctly: - -### 1. List All Connections -```bash -atlas streams connections list --projectId --output json -``` - -**Expected**: Your connection should appear in the list with: -- `name`: Matches your ConnectionName parameter -- `type`: Matches your connection type (Cluster, Kafka, or Sample) - -### 2. Get Connection Details -```bash -atlas streams connections get --projectId --output json -``` - -**For Cluster connections, verify:** -- `type` = "Cluster" -- `clusterName` = Your cluster name -- `dbRoleToExecute.role` = Your DbRole parameter -- `dbRoleToExecute.type` = Your DbRoleType parameter - -**For Kafka connections, verify:** -- `type` = "Kafka" -- `bootstrapServers` = Your BootstrapServers parameter -- `authentication.mechanism` = Your AuthMechanism parameter -- `security.protocol` = Your SecurityProtocol parameter - -**For Sample connections, verify:** -- `type` = "Sample" -- `name` = Your ConnectionName parameter (typically "sample_stream_solar") - -### 3. Verify in Atlas UI -1. Navigate to your Atlas project -2. Go to Stream Processing section -3. Select your Stream Workspace -4. View Connections tab -5. Verify the connection appears with correct configuration - -## Cleanup - -To delete a stream connection created via CloudFormation: - -```bash -# Delete the CloudFormation stack (recommended) -aws cloudformation delete-stack --stack-name -aws cloudformation wait stack-delete-complete --stack-name - -# Or delete directly using Atlas CLI -atlas streams connections delete --projectId --force -``` - -## Notes - -- **AWS-Only**: These templates are designed for AWS CloudFormation. Provider is implicitly AWS. -- **Backward Compatibility**: The resource supports both `WorkspaceName` (preferred) and `InstanceName` (deprecated). If both are provided, `WorkspaceName` takes precedence. CFN does not enforce mutual exclusivity. -- **Primary Identifier**: The resource is uniquely identified by: `ProjectId`, `ConnectionName`, `WorkspaceName`, and `Profile`. -- **Sensitive Fields**: Passwords and secrets should be managed through AWS Secrets Manager or CloudFormation parameters with `NoEcho: true`. -- **Required Resources**: Ensure Stream Workspace and (for Cluster connections) Atlas Cluster exist before deploying connection templates. +# How to create a MongoDB::Atlas::StreamConnection + +## Step 1: Activate the stream connection resource in cloudformation + Step a: Create Role using [execution-role.yaml](https://github.com/mongodb/mongodbatlas-cloudformation-resources/blob/master/examples/execution-role.yaml) in CFN resources folder. + + Step b: Search for Mongodb::Atlas::StreamConnection resource. + + (CloudFormation > Public extensions > choose 'Third party' > Search with " Execution name prefix = MongoDB " ) + Step c: Select and activate + Enter the RoleArn that is created in step 1. + + Your StreamConnection Resource is ready to use. + +## Step 2: Create template using example JSON files + Examples for each connection type: + + **Cluster type** - Connect to an Atlas cluster: + - [cluster-stream-connection.json](cluster-stream-connection.json) + + **Kafka type** - Connect to a Kafka cluster: + - [kafka-stream-connection.json](kafka-stream-connection.json) + - [kafka-oauth-stream-connection.json](kafka-oauth-stream-connection.json) + + **Sample type** - Use sample datasets: + - [sample-stream-connection.json](sample-stream-connection.json) + + **AWSLambda type** - Connect to AWS Lambda: + - [aws-lambda-stream-connection.json](aws-lambda-stream-connection.json) + + **Https type** - Connect via HTTPS: + - [https-stream-connection.json](https-stream-connection.json) + + Note: Make sure you are providing appropriate values for: + 1. ProjectId + 2. WorkspaceName (or InstanceName - deprecated) + 3. ConnectionName + 4. Type: Cluster, Kafka, Sample, AWSLambda, or Https + 5. Profile (optional) + 6. Type-specific fields (ClusterName for Cluster type, BootstrapServers for Kafka type, etc.) From 4960e83c7d48493e2cac2587d34e21abe3977768 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Fri, 16 Jan 2026 11:45:51 -0500 Subject: [PATCH 06/10] CLOUDP-369806-stream-connection internal review handling --- .../cmd/resource/mappings_test.go | 41 ++++++++++++++++++- 1 file changed, 40 insertions(+), 1 deletion(-) diff --git a/cfn-resources/stream-connection/cmd/resource/mappings_test.go b/cfn-resources/stream-connection/cmd/resource/mappings_test.go index d34b22ea0..fecac110a 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings_test.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings_test.go @@ -1,4 +1,4 @@ -// Copyright 2026 MongoDB Inc +// Copyright 2024 MongoDB Inc // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -38,6 +38,8 @@ const ( testBootstrap = "local.example.com:9192" testUser = "user1" testSampleName = "sample_stream_solar" + testRoleArn = "arn:aws:iam::123456789012:role/test-lambda-role" + testUrl = "https://api.example.com/stream" ) func TestMappings(t *testing.T) { @@ -133,6 +135,43 @@ func TestMappings(t *testing.T) { assert.Nil(t, result.DbRoleToExecute) }, }, + "GetStreamConnectionModel_awsLambda": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.AWSLambdaType), + Aws: &admin.StreamsAWSConnectionConfig{ + RoleArn: ptr.String(testRoleArn), + }, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.AWSLambdaType, *result.Type) + assert.NotNil(t, result.Aws) + assert.Equal(t, testRoleArn, *result.Aws.RoleArn) + }, + }, + "GetStreamConnectionModel_https": { + testFunc: func(t *testing.T) { + t.Helper() + testHeaders := map[string]string{ + "Authorization": "Bearer token123", + "Content-Type": "application/json", + } + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.HTTPSType), + Url: ptr.String(testUrl), + Headers: &testHeaders, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.HTTPSType, *result.Type) + assert.Equal(t, testUrl, *result.Url) + assert.NotNil(t, result.Headers) + assert.Equal(t, "Bearer token123", result.Headers["Authorization"]) + assert.Equal(t, "application/json", result.Headers["Content-Type"]) + }, + }, } for name, tc := range testCases { From a67f7b326a89e8c189f5cd65c5d50563288e2d6f Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Fri, 16 Jan 2026 15:53:27 -0500 Subject: [PATCH 07/10] CLOUDP-369806-stream-connection Lint Fix --- .../stream-connection/cmd/resource/mappings_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/cfn-resources/stream-connection/cmd/resource/mappings_test.go b/cfn-resources/stream-connection/cmd/resource/mappings_test.go index fecac110a..0ab5b690a 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings_test.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings_test.go @@ -39,7 +39,7 @@ const ( testUser = "user1" testSampleName = "sample_stream_solar" testRoleArn = "arn:aws:iam::123456789012:role/test-lambda-role" - testUrl = "https://api.example.com/stream" + testURL = "https://api.example.com/stream" ) func TestMappings(t *testing.T) { @@ -160,13 +160,13 @@ func TestMappings(t *testing.T) { } streamsConn := &admin.StreamsConnection{ Name: ptr.String(testConnection), Type: ptr.String(resource.HTTPSType), - Url: ptr.String(testUrl), + Url: ptr.String(testURL), Headers: &testHeaders, } result := resource.GetStreamConnectionModel(streamsConn, nil) assert.Equal(t, testConnection, *result.ConnectionName) assert.Equal(t, resource.HTTPSType, *result.Type) - assert.Equal(t, testUrl, *result.Url) + assert.Equal(t, testURL, *result.Url) assert.NotNil(t, result.Headers) assert.Equal(t, "Bearer token123", result.Headers["Authorization"]) assert.Equal(t, "application/json", result.Headers["Content-Type"]) From e9620d35e367d1d20f54330f10b71c7ab2353c62 Mon Sep 17 00:00:00 2001 From: sivaram-mongodb Date: Mon, 19 Jan 2026 09:41:05 +0530 Subject: [PATCH 08/10] simplify the aws lambda setup --- .../mongodb-atlas-streamconnection.json | 2 +- .../test/cfn-test-create-inputs.sh | 100 ++++++------------ .../test/cfn-test-delete-inputs.sh | 68 ++++-------- .../test/inputs_4_create.json | 2 +- .../test/inputs_4_update.json | 2 +- 5 files changed, 55 insertions(+), 119 deletions(-) diff --git a/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json b/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json index d1da0de12..defc2228d 100644 --- a/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json +++ b/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json @@ -160,7 +160,7 @@ }, "InstanceName": { "type": "string", - "description": "Human-readable label that identifies the stream instance. Deprecated: Use WorkspaceName instead." + "description": "Human-readable label that identifies the stream instance. WARNING: This field is deprecated and will be removed in the next major release. Please use WorkspaceName instead." }, "WorkspaceName": { "type": "string", diff --git a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh index 39329c4aa..926d49033 100755 --- a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh @@ -49,90 +49,52 @@ echo -e "Created Cluster \"${clusterName}\"" # AWS IAM role creation and authorization for Lambda connections echo "--------------------------------AWS Lambda IAM Role creation starts ----------------------------" -# Role names for CREATE and UPDATE scenarios -iamRoleNameCreate="mongodb-atlas-streams-lambda-$(date +%s)-${RANDOM}" -iamRoleNameUpdate="mongodb-atlas-streams-lambda-$(date +%s)-${RANDOM}-updated" -policyName="atlas-lambda-invoke-policy" +# Single IAM role for both CREATE and UPDATE scenarios (following Terraform pattern) +iamRoleName="mongodb-atlas-streams-lambda-$(date +%s)-${RANDOM}" -echo "Creating IAM roles: ${iamRoleNameCreate} and ${iamRoleNameUpdate}" +echo "Creating IAM role: ${iamRoleName}" -# Create first cloud provider access entry (for CREATE role) -roleIdCreate=$(atlas cloudProviders accessRoles aws create --projectId "${projectId}" --output json | jq -r '.roleId') -echo "Created Atlas cloud provider access entry for CREATE role: ${roleIdCreate}" +# Create cloud provider access entry +roleId=$(atlas cloudProviders accessRoles aws create --projectId "${projectId}" --output json | jq -r '.roleId') +echo "Created Atlas cloud provider access entry: ${roleId}" -# Create second cloud provider access entry (for UPDATE role) -roleIdUpdate=$(atlas cloudProviders accessRoles aws create --projectId "${projectId}" --output json | jq -r '.roleId') -echo "Created Atlas cloud provider access entry for UPDATE role: ${roleIdUpdate}" +# Get Atlas AWS Account ARN and External ID +atlasAWSAccountArn=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleId}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAWSAccountArn') +atlasAssumedRoleExternalId=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleId}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAssumedRoleExternalId') -# Get Atlas AWS Account ARN and External ID for CREATE role -atlasAWSAccountArnCreate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdCreate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAWSAccountArn') -atlasAssumedRoleExternalIdCreate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdCreate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAssumedRoleExternalId') - -# Get Atlas AWS Account ARN and External ID for UPDATE role -atlasAWSAccountArnUpdate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdUpdate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAWSAccountArn') -atlasAssumedRoleExternalIdUpdate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleIdUpdate}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAssumedRoleExternalId') - -# Create trust policy for CREATE role -jq --arg atlasAssumedRoleExternalId "$atlasAssumedRoleExternalIdCreate" \ - --arg atlasAWSAccountArn "$atlasAWSAccountArnCreate" \ +# Create trust policy +jq --arg atlasAssumedRoleExternalId "$atlasAssumedRoleExternalId" \ + --arg atlasAWSAccountArn "$atlasAWSAccountArn" \ '.Statement[0].Principal.AWS?|=$atlasAWSAccountArn | .Statement[0].Condition.StringEquals["sts:ExternalId"]?|=$atlasAssumedRoleExternalId' \ - "$(dirname "$0")/lambda-role-policy-template.json" >"$(dirname "$0")/lambda-trust-policy-create.json" - -# Create trust policy for UPDATE role -jq --arg atlasAssumedRoleExternalId "$atlasAssumedRoleExternalIdUpdate" \ - --arg atlasAWSAccountArn "$atlasAWSAccountArnUpdate" \ - '.Statement[0].Principal.AWS?|=$atlasAWSAccountArn | .Statement[0].Condition.StringEquals["sts:ExternalId"]?|=$atlasAssumedRoleExternalId' \ - "$(dirname "$0")/lambda-role-policy-template.json" >"$(dirname "$0")/lambda-trust-policy-update.json" + "$(dirname "$0")/lambda-role-policy-template.json" >"$(dirname "$0")/lambda-trust-policy.json" echo "--------------------------------AWS IAM Role creation starts ----------------------------" -# Check if CREATE role exists, delete if found -awsRoleIdCreate=$(aws iam get-role --role-name "${iamRoleNameCreate}" 2>/dev/null | jq --arg roleName "${iamRoleNameCreate}" -r '.Role | select(.RoleName==$roleName) | .RoleId' || echo "") -if [ -n "$awsRoleIdCreate" ]; then - aws iam delete-role-policy --role-name "${iamRoleNameCreate}" --policy-name "${policyName}" 2>/dev/null || true - aws iam delete-role --role-name "${iamRoleNameCreate}" - echo "Deleted existing CREATE role" +# Check if role exists, delete if found +awsRoleId=$(aws iam get-role --role-name "${iamRoleName}" 2>/dev/null | jq --arg roleName "${iamRoleName}" -r '.Role | select(.RoleName==$roleName) | .RoleId' || echo "") +if [ -n "$awsRoleId" ]; then + aws iam delete-role --role-name "${iamRoleName}" + echo "Deleted existing role" fi -# Create CREATE role -awsRoleIdCreate=$(aws iam create-role --role-name "${iamRoleNameCreate}" --assume-role-policy-document file://"$(dirname "$0")"/lambda-trust-policy-create.json | jq --arg roleName "${iamRoleNameCreate}" -r '.Role | select(.RoleName==$roleName) | .RoleId') -echo "Created AWS IAM role for CREATE: ${awsRoleIdCreate}" +# Create IAM role +awsRoleId=$(aws iam create-role --role-name "${iamRoleName}" --assume-role-policy-document file://"$(dirname "$0")"/lambda-trust-policy.json | jq --arg roleName "${iamRoleName}" -r '.Role | select(.RoleName==$roleName) | .RoleId') +echo "Created AWS IAM role: ${awsRoleId}" -# Check if UPDATE role exists, delete if found -awsRoleIdUpdate=$(aws iam get-role --role-name "${iamRoleNameUpdate}" 2>/dev/null | jq --arg roleName "${iamRoleNameUpdate}" -r '.Role | select(.RoleName==$roleName) | .RoleId' || echo "") -if [ -n "$awsRoleIdUpdate" ]; then - aws iam delete-role-policy --role-name "${iamRoleNameUpdate}" --policy-name "${policyName}" 2>/dev/null || true - aws iam delete-role --role-name "${iamRoleNameUpdate}" - echo "Deleted existing UPDATE role" -fi - -# Create UPDATE role -awsRoleIdUpdate=$(aws iam create-role --role-name "${iamRoleNameUpdate}" --assume-role-policy-document file://"$(dirname "$0")"/lambda-trust-policy-update.json | jq --arg roleName "${iamRoleNameUpdate}" -r '.Role | select(.RoleName==$roleName) | .RoleId') -echo "Created AWS IAM role for UPDATE: ${awsRoleIdUpdate}" - -# Get role ARNs -awsArnCreate=$(aws iam get-role --role-name "${iamRoleNameCreate}" | jq --arg roleName "${iamRoleNameCreate}" -r '.Role | select(.RoleName==$roleName) | .Arn') -awsArnUpdate=$(aws iam get-role --role-name "${iamRoleNameUpdate}" | jq --arg roleName "${iamRoleNameUpdate}" -r '.Role | select(.RoleName==$roleName) | .Arn') - -# Attach Lambda permissions to both roles -aws iam put-role-policy --role-name "${iamRoleNameCreate}" --policy-name "${policyName}" --policy-document file://"$(dirname "$0")"/lambda-permissions-template.json -aws iam put-role-policy --role-name "${iamRoleNameUpdate}" --policy-name "${policyName}" --policy-document file://"$(dirname "$0")"/lambda-permissions-template.json -echo "Attached Lambda invoke permissions to both roles" +# Get role ARN +awsArn=$(aws iam get-role --role-name "${iamRoleName}" | jq --arg roleName "${iamRoleName}" -r '.Role | select(.RoleName==$roleName) | .Arn') echo "--------------------------------AWS IAM Role creation ends ----------------------------" # Wait for AWS IAM role to propagate (similar to encryption-at-rest pattern) -echo "Waiting for IAM roles to propagate..." +echo "Waiting for IAM role to propagate..." sleep 65 -# Authorize the roles in Atlas -echo "--------------------------------Authorize MongoDB Atlas Roles starts ----------------------------" -atlas cloudProviders accessRoles aws authorize "${roleIdCreate}" --iamAssumedRoleArn "${awsArnCreate}" --projectId "${projectId}" -echo "Authorized CREATE role: ${iamRoleNameCreate}" - -atlas cloudProviders accessRoles aws authorize "${roleIdUpdate}" --iamAssumedRoleArn "${awsArnUpdate}" --projectId "${projectId}" -echo "Authorized UPDATE role: ${iamRoleNameUpdate}" -echo "--------------------------------Authorize MongoDB Atlas Roles ends ----------------------------" +# Authorize the role in Atlas +echo "--------------------------------Authorize MongoDB Atlas Role starts ----------------------------" +atlas cloudProviders accessRoles aws authorize "${roleId}" --iamAssumedRoleArn "${awsArn}" --projectId "${projectId}" +echo "Authorized role: ${iamRoleName}" +echo "--------------------------------Authorize MongoDB Atlas Role ends ----------------------------" jq --arg cluster_name "$clusterName" \ --arg workspace_name "$workspaceName" \ @@ -187,7 +149,7 @@ jq --arg workspace_name "$workspaceName" \ jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ - --arg role_arn "$awsArnCreate" \ + --arg role_arn "$awsArn" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name @@ -197,7 +159,7 @@ jq --arg workspace_name "$workspaceName" \ jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ - --arg role_arn "$awsArnUpdate" \ + --arg role_arn "$awsArn" \ '.Profile?|=$profile | .ProjectId?|=$project_id | .WorkspaceName?|=$workspace_name diff --git a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh index 247354ab2..06a29af75 100755 --- a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh @@ -45,69 +45,43 @@ else echo "failed to delete the stream workspace/instance with name ${workspaceOrInstanceName}" fi -# Delete AWS Lambda IAM roles if they exist -echo "--------------------------------delete AWS Lambda IAM roles starts ----------------------------" +# Delete AWS Lambda IAM role if it exists +echo "--------------------------------delete AWS Lambda IAM role starts ----------------------------" # Check if Lambda input files exist if [ -f "./inputs/inputs_4_create.json" ]; then - echo "Found Lambda connection inputs, cleaning up IAM roles..." + echo "Found Lambda connection inputs, cleaning up IAM role..." - policyName="atlas-lambda-invoke-policy" - - # Extract role ARN from CREATE input file - roleArnCreate=$(jq -r '.Aws.RoleArn // empty' ./inputs/inputs_4_create.json) - # Extract role name from ARN (everything after the last '/') - iamRoleNameCreate=$(echo "${roleArnCreate}" | awk -F'/' '{print $NF}') - - # Extract role ARN from UPDATE input file - roleArnUpdate=$(jq -r '.Aws.RoleArn // empty' ./inputs/inputs_4_update.json) + # Extract role ARN from CREATE input file (same role used for both CREATE and UPDATE) + roleArn=$(jq -r '.Aws.RoleArn // empty' ./inputs/inputs_4_create.json) # Extract role name from ARN (everything after the last '/') - iamRoleNameUpdate=$(echo "${roleArnUpdate}" | awk -F'/' '{print $NF}') + iamRoleName=$(echo "${roleArn}" | awk -F'/' '{print $NF}') - # Get external IDs from trust policy files and find roleIds in Atlas - if [ -f "$(dirname "$0")/lambda-trust-policy-create.json" ]; then - atlasAssumedRoleExternalIdCreate=$(jq -r '.Statement[0].Condition.StringEquals["sts:ExternalId"]' "$(dirname "$0")/lambda-trust-policy-create.json") - roleIdCreate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg extId "${atlasAssumedRoleExternalIdCreate}" -r '.awsIamRoles[] | select(.atlasAssumedRoleExternalId | test($extId)) | .roleId') + # Get external ID from trust policy file and find roleId in Atlas + if [ -f "$(dirname "$0")/lambda-trust-policy.json" ]; then + atlasAssumedRoleExternalId=$(jq -r '.Statement[0].Condition.StringEquals["sts:ExternalId"]' "$(dirname "$0")/lambda-trust-policy.json") + roleId=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg extId "${atlasAssumedRoleExternalId}" -r '.awsIamRoles[] | select(.atlasAssumedRoleExternalId | test($extId)) | .roleId') - if [ -n "${roleIdCreate}" ] && [ "${roleIdCreate}" != "null" ]; then - echo "Deauthorizing CREATE role from Atlas: ${roleIdCreate}" - atlas cloudProviders accessRoles aws deauthorize "${roleIdCreate}" --projectId "${projectId}" --force || echo "Failed to deauthorize CREATE role" + if [ -n "${roleId}" ] && [ "${roleId}" != "null" ]; then + echo "Deauthorizing role from Atlas: ${roleId}" + atlas cloudProviders accessRoles aws deauthorize "${roleId}" --projectId "${projectId}" --force || echo "Failed to deauthorize role" fi fi - if [ -f "$(dirname "$0")/lambda-trust-policy-update.json" ]; then - atlasAssumedRoleExternalIdUpdate=$(jq -r '.Statement[0].Condition.StringEquals["sts:ExternalId"]' "$(dirname "$0")/lambda-trust-policy-update.json") - roleIdUpdate=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg extId "${atlasAssumedRoleExternalIdUpdate}" -r '.awsIamRoles[] | select(.atlasAssumedRoleExternalId | test($extId)) | .roleId') - - if [ -n "${roleIdUpdate}" ] && [ "${roleIdUpdate}" != "null" ]; then - echo "Deauthorizing UPDATE role from Atlas: ${roleIdUpdate}" - atlas cloudProviders accessRoles aws deauthorize "${roleIdUpdate}" --projectId "${projectId}" --force || echo "Failed to deauthorize UPDATE role" - fi - fi - - # Delete CREATE IAM role - if [ -n "${iamRoleNameCreate}" ] && [ "${iamRoleNameCreate}" != "null" ] && [ "${iamRoleNameCreate}" != "" ]; then - echo "Deleting CREATE IAM role: ${iamRoleNameCreate}" - aws iam delete-role-policy --role-name "${iamRoleNameCreate}" --policy-name "${policyName}" 2>/dev/null || echo "Policy already deleted or doesn't exist" - aws iam delete-role --role-name "${iamRoleNameCreate}" 2>/dev/null || echo "Role already deleted or doesn't exist" - fi - - # Delete UPDATE IAM role - if [ -n "${iamRoleNameUpdate}" ] && [ "${iamRoleNameUpdate}" != "null" ] && [ "${iamRoleNameUpdate}" != "" ]; then - echo "Deleting UPDATE IAM role: ${iamRoleNameUpdate}" - aws iam delete-role-policy --role-name "${iamRoleNameUpdate}" --policy-name "${policyName}" 2>/dev/null || echo "Policy already deleted or doesn't exist" - aws iam delete-role --role-name "${iamRoleNameUpdate}" 2>/dev/null || echo "Role already deleted or doesn't exist" + # Delete IAM role + if [ -n "${iamRoleName}" ] && [ "${iamRoleName}" != "null" ] && [ "${iamRoleName}" != "" ]; then + echo "Deleting IAM role: ${iamRoleName}" + aws iam delete-role --role-name "${iamRoleName}" 2>/dev/null || echo "Role already deleted or doesn't exist" fi - # Clean up temporary files - rm -f "$(dirname "$0")/lambda-trust-policy-create.json" - rm -f "$(dirname "$0")/lambda-trust-policy-update.json" + # Clean up temporary file + rm -f "$(dirname "$0")/lambda-trust-policy.json" - echo "Cleaned up Lambda IAM roles and temporary files" + echo "Cleaned up Lambda IAM role and temporary files" else echo "No Lambda connection inputs found, skipping IAM role cleanup" fi -echo "--------------------------------delete AWS Lambda IAM roles ends ----------------------------" +echo "--------------------------------delete AWS Lambda IAM role ends ----------------------------" #delete project if atlas projects delete "$projectId" --force; then diff --git a/cfn-resources/stream-connection/test/inputs_4_create.json b/cfn-resources/stream-connection/test/inputs_4_create.json index cc17a61f6..b99f6f9f6 100644 --- a/cfn-resources/stream-connection/test/inputs_4_create.json +++ b/cfn-resources/stream-connection/test/inputs_4_create.json @@ -5,7 +5,7 @@ "ConnectionName": "ConnectionNameAWSLambda", "Type": "AWSLambda", "Aws": { - "RoleArn": "arn:aws:iam::263641576157:role/mongodb-atlas-streams-lambda-new" + "RoleArn": "" } } diff --git a/cfn-resources/stream-connection/test/inputs_4_update.json b/cfn-resources/stream-connection/test/inputs_4_update.json index 08135e379..b99f6f9f6 100644 --- a/cfn-resources/stream-connection/test/inputs_4_update.json +++ b/cfn-resources/stream-connection/test/inputs_4_update.json @@ -5,7 +5,7 @@ "ConnectionName": "ConnectionNameAWSLambda", "Type": "AWSLambda", "Aws": { - "RoleArn": "arn:aws:iam::263641576157:role/mongodb-atlas-streams-lambda-new-updated" + "RoleArn": "" } } From 68da0b1cbbf008e36de4264b5f5c9458569b62e0 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Mon, 19 Jan 2026 21:36:43 -0500 Subject: [PATCH 09/10] CLOUDP-369806-stream-connection Handling api team review comments --- .../cmd/resource/resource.go | 76 +++++++------------ 1 file changed, 27 insertions(+), 49 deletions(-) diff --git a/cfn-resources/stream-connection/cmd/resource/resource.go b/cfn-resources/stream-connection/cmd/resource/resource.go index 81b1c73c1..6db92ae9c 100644 --- a/cfn-resources/stream-connection/cmd/resource/resource.go +++ b/cfn-resources/stream-connection/cmd/resource/resource.go @@ -43,29 +43,26 @@ var UpdateRequiredFields = []string{constants.ProjectID, constants.ConnectionNam var DeleteRequiredFields = []string{constants.ProjectID, constants.ConnectionName} var ListRequiredFields = []string{constants.ProjectID} -func getWorkspaceOrInstanceName(model *Model) (*string, *handler.ProgressEvent) { - if model.WorkspaceName != nil && *model.WorkspaceName != "" { - return model.WorkspaceName, nil - } - if model.InstanceName != nil && *model.InstanceName != "" { - return model.InstanceName, nil - } - return nil, &handler.ProgressEvent{ - OperationStatus: handler.Failed, - Message: "Either WorkspaceName or InstanceName must be provided", - HandlerErrorCode: string(types.HandlerErrorCodeInvalidRequest), - } -} +func normalizeWorkspaceName(model *Model) *handler.ProgressEvent { + var workspaceOrInstanceName *string -func normalizeWorkspaceName(model *Model) { - if model != nil { - if model.WorkspaceName != nil && *model.WorkspaceName != "" { - return - } - if model.InstanceName != nil && *model.InstanceName != "" { - model.WorkspaceName = model.InstanceName + // Validate that at least one of WorkspaceName or InstanceName is provided + if model.WorkspaceName != nil && *model.WorkspaceName != "" { + workspaceOrInstanceName = model.WorkspaceName + } else if model.InstanceName != nil && *model.InstanceName != "" { + workspaceOrInstanceName = model.InstanceName + } else { + return &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Either WorkspaceName or InstanceName must be provided", + HandlerErrorCode: string(types.HandlerErrorCodeInvalidRequest), } } + + // Ensure both WorkspaceName and InstanceName are set for backward compatibility + model.WorkspaceName = workspaceOrInstanceName + model.InstanceName = workspaceOrInstanceName + return nil } var InitEnvWithLatestClient = func(req handler.Request, currentModel *Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { @@ -77,7 +74,9 @@ var InitEnvWithLatestClient = func(req handler.Request, currentModel *Model, req return nil, errEvent } - normalizeWorkspaceName(currentModel) + if peErr := normalizeWorkspaceName(currentModel); peErr != nil { + return nil, peErr + } client, peErr := util.NewAtlasClient(&req, currentModel.Profile) if peErr != nil { @@ -92,10 +91,7 @@ func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler return *peErr, nil } - workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) - if peErr != nil { - return *peErr, nil - } + workspaceOrInstanceName := currentModel.WorkspaceName ctx := context.Background() @@ -122,10 +118,7 @@ func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.P return *peErr, nil } - workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) - if peErr != nil { - return *peErr, nil - } + workspaceOrInstanceName := currentModel.WorkspaceName projectID := currentModel.ProjectId connectionName := currentModel.ConnectionName @@ -148,10 +141,7 @@ func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler return *peErr, nil } - workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) - if peErr != nil { - return *peErr, nil - } + workspaceOrInstanceName := currentModel.WorkspaceName ctx := context.Background() @@ -178,10 +168,7 @@ func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler return *peErr, nil } - workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) - if peErr != nil { - return *peErr, nil - } + workspaceOrInstanceName := currentModel.WorkspaceName ctx := context.Background() @@ -204,10 +191,7 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P return *peErr, nil } - workspaceOrInstanceName, peErr := getWorkspaceOrInstanceName(currentModel) - if peErr != nil { - return *peErr, nil - } + workspaceOrInstanceName := currentModel.WorkspaceName ctx := context.Background() @@ -222,15 +206,9 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P for i := range accumulatedStreamConns { model := GetStreamConnectionModel(&accumulatedStreamConns[i], nil) model.ProjectId = currentModel.ProjectId - // Set both WorkspaceName and InstanceName for consistency and backward compatibility // InstanceName is deprecated but we maintain it for backward compatibility - if currentModel.WorkspaceName != nil { - model.WorkspaceName = currentModel.WorkspaceName - model.InstanceName = currentModel.WorkspaceName - } else if currentModel.InstanceName != nil { - model.WorkspaceName = currentModel.InstanceName - model.InstanceName = currentModel.InstanceName - } + model.WorkspaceName = workspaceOrInstanceName + model.InstanceName = workspaceOrInstanceName model.Profile = currentModel.Profile response = append(response, model) From 8ecd0b0912c75dc2d116bcf62dac187c876fe864 Mon Sep 17 00:00:00 2001 From: ParthasarathyV Date: Mon, 19 Jan 2026 21:41:19 -0500 Subject: [PATCH 10/10] CLOUDP-369806-stream-connection Handling api team review comments --- cfn-resources/stream-connection/cmd/resource/resource.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/cfn-resources/stream-connection/cmd/resource/resource.go b/cfn-resources/stream-connection/cmd/resource/resource.go index 6db92ae9c..50ac22f8a 100644 --- a/cfn-resources/stream-connection/cmd/resource/resource.go +++ b/cfn-resources/stream-connection/cmd/resource/resource.go @@ -47,11 +47,12 @@ func normalizeWorkspaceName(model *Model) *handler.ProgressEvent { var workspaceOrInstanceName *string // Validate that at least one of WorkspaceName or InstanceName is provided - if model.WorkspaceName != nil && *model.WorkspaceName != "" { + switch { + case model.WorkspaceName != nil && *model.WorkspaceName != "": workspaceOrInstanceName = model.WorkspaceName - } else if model.InstanceName != nil && *model.InstanceName != "" { + case model.InstanceName != nil && *model.InstanceName != "": workspaceOrInstanceName = model.InstanceName - } else { + default: return &handler.ProgressEvent{ OperationStatus: handler.Failed, Message: "Either WorkspaceName or InstanceName must be provided",