diff --git a/cfn-resources/stream-connection/README.md b/cfn-resources/stream-connection/README.md index 18f9db2c3..407b65df8 100644 --- a/cfn-resources/stream-connection/README.md +++ b/cfn-resources/stream-connection/README.md @@ -11,6 +11,52 @@ For instructions on setting up a profile, [see here](/README.md#mongodb-atlas-ap See the [resource docs](docs/README.md). Also refer [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials. -## Cloudformation Examples +## CloudFormation Examples -See the example [CFN Template](/examples/stream-connection/kafka-stream-connection.json) for example resource. +Example templates are available in the [examples directory](/examples/atlas-streams/stream-connection/): + +- **Cluster Connection**: [cluster-stream-connection.json](/examples/atlas-streams/stream-connection/cluster-stream-connection.json) - Connects a Stream Workspace to an Atlas cluster +- **Kafka Connection**: [kafka-stream-connection.json](/examples/atlas-streams/stream-connection/kafka-stream-connection.json) - Connects a Stream Workspace to a Kafka cluster +- **Sample Connection**: [sample-stream-connection.json](/examples/atlas-streams/stream-connection/sample-stream-connection.json) - Uses a sample dataset + +For detailed deployment and verification instructions, see the [examples README](/examples/atlas-streams/stream-connection/README.md). + +## Deployment + +### Prerequisites +1. An existing Atlas project +2. An existing Stream Workspace (create using `atlas streams instances create`) +3. For Cluster connections: An existing Atlas cluster +4. AWS credentials configured with CloudFormation permissions +5. Atlas API keys stored in AWS Secrets Manager + +### Deploy Example + +```bash +aws cloudformation deploy \ + --template-file examples/atlas-streams/stream-connection/cluster-stream-connection.json \ + --stack-name my-stream-connection \ + --parameter-overrides \ + ProjectId=YOUR_PROJECT_ID \ + WorkspaceName=YOUR_WORKSPACE_NAME \ + ConnectionName=my-connection \ + ClusterName=YOUR_CLUSTER_NAME \ + DbRole=atlasAdmin \ + DbRoleType=BUILT_IN \ + --capabilities CAPABILITY_IAM \ + --region us-east-1 +``` + +## Verification + +After deployment, verify the connection using Atlas CLI: + +```bash +# List all connections for the workspace +atlas streams connections list --projectId + +# Get specific connection details +atlas streams connections get --projectId +``` + +The connection should appear in the list with the correct type and configuration matching your CloudFormation template parameters. diff --git a/cfn-resources/stream-connection/cmd/resource/mappings.go b/cfn-resources/stream-connection/cmd/resource/mappings.go index b0e379b24..961acd126 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings.go @@ -15,26 +15,37 @@ package resource import ( - admin20231115014 "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/mongodb/mongodbatlas-cloudformation-resources/util" ) -func GetStreamConnectionModel(streamsConn *admin20231115014.StreamsConnection, currentModel *Model) *Model { - model := new(Model) +func GetStreamConnectionModel(streamsConn *admin.StreamsConnection, currentModel *Model) *Model { + var model *Model if currentModel != nil { model = currentModel + if model.WorkspaceName == nil && model.InstanceName != nil && *model.InstanceName != "" { + model.WorkspaceName = model.InstanceName + } + } else { + model = new(Model) } model.ConnectionName = streamsConn.Name model.Type = streamsConn.Type model.ClusterName = streamsConn.ClusterName + if streamsConn.ClusterGroupId != nil { + model.ClusterProjectId = streamsConn.ClusterGroupId + } model.BootstrapServers = streamsConn.BootstrapServers + if streamsConn.Url != nil { + model.Url = streamsConn.Url + } model.DbRoleToExecute = NewModelDBRoleToExecute(streamsConn.DbRoleToExecute) - model.Authentication = NewModelAuthentication(streamsConn.Authentication) + model.Authentication = NewModelAuthentication(streamsConn.Authentication, currentModel) model.Security = NewModelSecurity(streamsConn.Security) @@ -42,10 +53,29 @@ func GetStreamConnectionModel(streamsConn *admin20231115014.StreamsConnection, c model.Config = *streamsConn.Config } + if streamsConn.Headers != nil { + model.Headers = *streamsConn.Headers + } + + if streamsConn.Networking != nil && streamsConn.Networking.Access != nil { + model.Networking = &Networking{ + Access: &Access{ + Type: streamsConn.Networking.Access.Type, + ConnectionId: streamsConn.Networking.Access.ConnectionId, + }, + } + } + + if streamsConn.Aws != nil { + model.Aws = &Aws{ + RoleArn: streamsConn.Aws.RoleArn, + } + } + return model } -func NewModelDBRoleToExecute(dbRole *admin20231115014.DBRoleToExecute) *DBRoleToExecute { +func NewModelDBRoleToExecute(dbRole *admin.DBRoleToExecute) *DBRoleToExecute { if dbRole == nil { return nil } @@ -56,19 +86,25 @@ func NewModelDBRoleToExecute(dbRole *admin20231115014.DBRoleToExecute) *DBRoleTo } } -func NewModelAuthentication(authentication *admin20231115014.StreamsKafkaAuthentication) *StreamsKafkaAuthentication { +func NewModelAuthentication(authentication *admin.StreamsKafkaAuthentication, currentModel *Model) *StreamsKafkaAuthentication { if authentication == nil { return nil } - return &StreamsKafkaAuthentication{ - Mechanism: authentication.Mechanism, - Password: authentication.Password, - Username: authentication.Username, + authModel := &StreamsKafkaAuthentication{ + Mechanism: authentication.Mechanism, + Method: authentication.Method, + Username: authentication.Username, + TokenEndpointUrl: authentication.TokenEndpointUrl, + ClientId: authentication.ClientId, + Scope: authentication.Scope, + SaslOauthbearerExtensions: authentication.SaslOauthbearerExtensions, } + + return authModel } -func NewModelSecurity(security *admin20231115014.StreamsKafkaSecurity) *StreamsKafkaSecurity { +func NewModelSecurity(security *admin.StreamsKafkaSecurity) *StreamsKafkaSecurity { if security == nil { return nil } @@ -79,18 +115,23 @@ func NewModelSecurity(security *admin20231115014.StreamsKafkaSecurity) *StreamsK } } -func newStreamConnectionReq(model *Model) *admin20231115014.StreamsConnection { - streamConnReq := admin20231115014.StreamsConnection{ +func newStreamConnectionReq(model *Model) *admin.StreamsConnection { + streamConnReq := admin.StreamsConnection{ Name: model.ConnectionName, Type: model.Type, } - if util.SafeString(streamConnReq.Type) == ClusterConnectionType { + typeStr := util.SafeString(streamConnReq.Type) + + if typeStr == ClusterConnectionType { streamConnReq.ClusterName = model.ClusterName + if model.ClusterProjectId != nil { + streamConnReq.ClusterGroupId = model.ClusterProjectId + } streamConnReq.DbRoleToExecute = NewDBRoleToExecute(model.DbRoleToExecute) } - if util.SafeString(streamConnReq.Type) == KafkaConnectionType { + if typeStr == KafkaConnectionType { streamConnReq.BootstrapServers = model.BootstrapServers streamConnReq.Security = newStreamsKafkaSecurity(model.Security) streamConnReq.Authentication = newStreamsKafkaAuthentication(model.Authentication) @@ -98,41 +139,71 @@ func newStreamConnectionReq(model *Model) *admin20231115014.StreamsConnection { if model.Config != nil { streamConnReq.Config = &model.Config } + + if model.Networking != nil && model.Networking.Access != nil { + streamConnReq.Networking = &admin.StreamsKafkaNetworking{ + Access: &admin.StreamsKafkaNetworkingAccess{ + Type: model.Networking.Access.Type, + ConnectionId: model.Networking.Access.ConnectionId, + }, + } + } + } + + if typeStr == AWSLambdaType { + if model.Aws != nil { + streamConnReq.Aws = &admin.StreamsAWSConnectionConfig{ + RoleArn: model.Aws.RoleArn, + } + } + } + + if typeStr == HTTPSType { + streamConnReq.Url = model.Url + if model.Headers != nil { + streamConnReq.Headers = &model.Headers + } } return &streamConnReq } -func NewDBRoleToExecute(dbRoleToExecuteModel *DBRoleToExecute) *admin20231115014.DBRoleToExecute { +func NewDBRoleToExecute(dbRoleToExecuteModel *DBRoleToExecute) *admin.DBRoleToExecute { if dbRoleToExecuteModel == nil { return nil } - return &admin20231115014.DBRoleToExecute{ + return &admin.DBRoleToExecute{ Role: dbRoleToExecuteModel.Role, Type: dbRoleToExecuteModel.Type, } } -func newStreamsKafkaSecurity(securityModel *StreamsKafkaSecurity) *admin20231115014.StreamsKafkaSecurity { +func newStreamsKafkaSecurity(securityModel *StreamsKafkaSecurity) *admin.StreamsKafkaSecurity { if securityModel == nil { return nil } - return &admin20231115014.StreamsKafkaSecurity{ + return &admin.StreamsKafkaSecurity{ BrokerPublicCertificate: securityModel.BrokerPublicCertificate, Protocol: securityModel.Protocol, } } -func newStreamsKafkaAuthentication(authenticationModel *StreamsKafkaAuthentication) *admin20231115014.StreamsKafkaAuthentication { +func newStreamsKafkaAuthentication(authenticationModel *StreamsKafkaAuthentication) *admin.StreamsKafkaAuthentication { if authenticationModel == nil { return nil } - return &admin20231115014.StreamsKafkaAuthentication{ - Mechanism: authenticationModel.Mechanism, - Password: authenticationModel.Password, - Username: authenticationModel.Username, + return &admin.StreamsKafkaAuthentication{ + Mechanism: authenticationModel.Mechanism, + Method: authenticationModel.Method, + Username: authenticationModel.Username, + Password: authenticationModel.Password, + TokenEndpointUrl: authenticationModel.TokenEndpointUrl, + ClientId: authenticationModel.ClientId, + ClientSecret: authenticationModel.ClientSecret, + Scope: authenticationModel.Scope, + SaslOauthbearerExtensions: authenticationModel.SaslOauthbearerExtensions, } } diff --git a/cfn-resources/stream-connection/cmd/resource/mappings_test.go b/cfn-resources/stream-connection/cmd/resource/mappings_test.go index 3bf65a700..0ab5b690a 100644 --- a/cfn-resources/stream-connection/cmd/resource/mappings_test.go +++ b/cfn-resources/stream-connection/cmd/resource/mappings_test.go @@ -17,294 +17,164 @@ package resource_test import ( "testing" - admin20231115014 "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/aws/smithy-go/ptr" "github.com/mongodb/mongodbatlas-cloudformation-resources/stream-connection/cmd/resource" "github.com/stretchr/testify/assert" ) -func TestNewModelDBRoleToExecute(t *testing.T) { - tests := []struct { - input *admin20231115014.DBRoleToExecute - expected *resource.DBRoleToExecute - name string - }{ - { - name: "Nil Input", - input: nil, - expected: nil, - }, - { - name: "Valid Input", - input: &admin20231115014.DBRoleToExecute{ - Role: ptr.String("readWrite"), - Type: ptr.String("BUILT_IN"), - }, - expected: &resource.DBRoleToExecute{ - Role: ptr.String("readWrite"), - Type: ptr.String("BUILT_IN"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewModelDBRoleToExecute(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} +const ( + testRoleValue = "readWrite" + testRoleTypeValue = "BUILT_IN" + testUsername = "testuser111" + testMechanism = "PLAIN" + testProtocol = "SSL" + testCert = "testcert" + testCustomRole = "customroleadmin" + testCustomType = "CUSTOM" + testConnection = "TestConnection" + testCluster = "TestCluster" + testBootstrap = "local.example.com:9192" + testUser = "user1" + testSampleName = "sample_stream_solar" + testRoleArn = "arn:aws:iam::123456789012:role/test-lambda-role" + testURL = "https://api.example.com/stream" +) -func TestNewModelAuthentication(t *testing.T) { - tests := []struct { - input *admin20231115014.StreamsKafkaAuthentication - expected *resource.StreamsKafkaAuthentication - name string +func TestMappings(t *testing.T) { + testCases := map[string]struct { + testFunc func(*testing.T) }{ - { - name: "Nil Input", - input: nil, - expected: nil, - }, - { - name: "Valid Input", - input: &admin20231115014.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("testuser111"), - Password: ptr.String("testpassword"), + "NewModelDBRoleToExecute": { + testFunc: func(t *testing.T) { + t.Helper() + input := &admin.DBRoleToExecute{Role: ptr.String(testRoleValue), Type: ptr.String(testRoleTypeValue)} + result := resource.NewModelDBRoleToExecute(input) + assert.Equal(t, testRoleValue, *result.Role) + assert.Equal(t, testRoleTypeValue, *result.Type) + assert.Nil(t, resource.NewModelDBRoleToExecute(nil)) }, - expected: &resource.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("testuser111"), - Password: ptr.String("testpassword"), - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewModelAuthentication(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestNewModelSecurity(t *testing.T) { - tests := []struct { - input *admin20231115014.StreamsKafkaSecurity - expected *resource.StreamsKafkaSecurity - name string - }{ - { - name: "Nil Input", - input: nil, - expected: nil, }, - { - name: "Valid Input", - input: &admin20231115014.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("testcert"), - Protocol: ptr.String("SSL"), - }, - expected: &resource.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("testcert"), - Protocol: ptr.String("SSL"), + "NewModelAuthentication": { + testFunc: func(t *testing.T) { + t.Helper() + input := &admin.StreamsKafkaAuthentication{ + Mechanism: ptr.String(testMechanism), Username: ptr.String(testUsername), + Password: ptr.String("test-password-placeholder"), + } + result := resource.NewModelAuthentication(input, nil) + assert.Equal(t, testMechanism, *result.Mechanism) + assert.Equal(t, testUsername, *result.Username) + assert.Nil(t, result.Password) + assert.Nil(t, resource.NewModelAuthentication(nil, nil)) }, }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewModelSecurity(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestNewDBRoleToExecute(t *testing.T) { - tests := []struct { - input *resource.DBRoleToExecute - expected *admin20231115014.DBRoleToExecute - name string - }{ - { - name: "Nil Input", - input: nil, - expected: nil, - }, - { - name: "Valid Input", - input: &resource.DBRoleToExecute{ - Role: ptr.String("customroleadmin"), - Type: ptr.String("CUSTOM"), - }, - expected: &admin20231115014.DBRoleToExecute{ - Role: ptr.String("customroleadmin"), - Type: ptr.String("CUSTOM"), + "NewModelSecurity": { + testFunc: func(t *testing.T) { + t.Helper() + input := &admin.StreamsKafkaSecurity{ + BrokerPublicCertificate: ptr.String(testCert), Protocol: ptr.String(testProtocol), + } + result := resource.NewModelSecurity(input) + assert.Equal(t, testCert, *result.BrokerPublicCertificate) + assert.Equal(t, testProtocol, *result.Protocol) + assert.Nil(t, resource.NewModelSecurity(nil)) }, }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - actual := resource.NewDBRoleToExecute(tt.input) - assert.Equal(t, tt.expected, actual) - }) - } -} - -func TestGetStreamConnectionKafkaTypeModel(t *testing.T) { - streamsConnKafka := &admin20231115014.StreamsConnection{ - Name: ptr.String("TestConnection"), - Type: ptr.String("Kafka"), - BootstrapServers: ptr.String("local.example.com:9192"), - Authentication: &admin20231115014.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("user1"), - Password: ptr.String("passwrd"), - }, - Security: &admin20231115014.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("cert1"), - Protocol: ptr.String("SSL"), - }, - Config: &map[string]string{"retention.test": "60000"}, - } - - t.Run("With Nil Current Model", func(t *testing.T) { - result := resource.GetStreamConnectionModel(streamsConnKafka, nil) - - assert.NotNil(t, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, *streamsConnKafka.BootstrapServers, *result.BootstrapServers) - assert.Equal(t, *streamsConnKafka.Authentication.Mechanism, *result.Authentication.Mechanism) - assert.Equal(t, *streamsConnKafka.Security.Protocol, *result.Security.Protocol) - assert.Equal(t, map[string]string{"retention.test": "60000"}, result.Config) - }) - - t.Run("With Non-Null Current Model", func(t *testing.T) { - currentModel := &resource.Model{ - Profile: ptr.String("default"), - ProjectId: ptr.String("testProjectID"), - InstanceName: ptr.String("TestInstance"), - ConnectionName: ptr.String("TestConnection"), - Type: ptr.String("Kafka"), - BootstrapServers: ptr.String("local.example.com:9192"), - Authentication: &resource.StreamsKafkaAuthentication{ - Mechanism: ptr.String("PLAIN"), - Username: ptr.String("user1"), - Password: ptr.String("passwrd"), + "NewDBRoleToExecute": { + testFunc: func(t *testing.T) { + t.Helper() + input := &resource.DBRoleToExecute{Role: ptr.String(testCustomRole), Type: ptr.String(testCustomType)} + result := resource.NewDBRoleToExecute(input) + assert.Equal(t, testCustomRole, *result.Role) + assert.Equal(t, testCustomType, *result.Type) + assert.Nil(t, resource.NewDBRoleToExecute(nil)) }, - Security: &resource.StreamsKafkaSecurity{ - BrokerPublicCertificate: ptr.String("cert1"), - Protocol: ptr.String("SSL"), + }, + "GetStreamConnectionModel_kafka": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.KafkaConnectionType), + BootstrapServers: ptr.String(testBootstrap), + Authentication: &admin.StreamsKafkaAuthentication{ + Mechanism: ptr.String(testMechanism), Username: ptr.String(testUser), + }, + Security: &admin.StreamsKafkaSecurity{Protocol: ptr.String(testProtocol)}, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.KafkaConnectionType, *result.Type) + assert.Equal(t, testBootstrap, *result.BootstrapServers) }, - Config: map[string]string{"retention.test": "60000"}, - } - result := resource.GetStreamConnectionModel(streamsConnKafka, currentModel) - - assert.Equal(t, currentModel, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *currentModel.InstanceName, *result.InstanceName) - assert.Equal(t, *currentModel.Profile, *result.Profile) - assert.Equal(t, *currentModel.ProjectId, *result.ProjectId) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, *streamsConnKafka.BootstrapServers, *result.BootstrapServers) - assert.Equal(t, *streamsConnKafka.Authentication.Mechanism, *result.Authentication.Mechanism) - assert.Equal(t, *streamsConnKafka.Security.Protocol, *result.Security.Protocol) - }) -} - -func TestGetStreamConnectionClusterTypeModel(t *testing.T) { - streamsConnKafka := &admin20231115014.StreamsConnection{ - Name: ptr.String("TestConnection"), - Type: ptr.String("Cluster"), - ClusterName: ptr.String("TestCluster"), - DbRoleToExecute: &admin20231115014.DBRoleToExecute{ - Role: ptr.String("admin"), - Type: ptr.String("Custom"), }, - } - - t.Run("With Nil Current Model", func(t *testing.T) { - result := resource.GetStreamConnectionModel(streamsConnKafka, nil) - - assert.NotNil(t, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetRole(), *result.DbRoleToExecute.Role) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetType(), *result.DbRoleToExecute.Type) - }) - - t.Run("With Non-Null Current Model", func(t *testing.T) { - currentModel := &resource.Model{ - Profile: ptr.String("default"), - ProjectId: ptr.String("testProjectID"), - InstanceName: ptr.String("TestInstance"), - ConnectionName: ptr.String("TestConnection"), - Type: ptr.String("Kafka"), - ClusterName: ptr.String("TestCluster"), - DbRoleToExecute: &resource.DBRoleToExecute{ - Role: ptr.String("admin"), - Type: ptr.String("Custom"), + "GetStreamConnectionModel_cluster": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.ClusterConnectionType), + ClusterName: ptr.String(testCluster), + DbRoleToExecute: &admin.DBRoleToExecute{Role: ptr.String("admin"), Type: ptr.String("Custom")}, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.ClusterConnectionType, *result.Type) + assert.Equal(t, "admin", *result.DbRoleToExecute.Role) }, - } - result := resource.GetStreamConnectionModel(streamsConnKafka, currentModel) - - assert.Equal(t, currentModel, result) - assert.Equal(t, *streamsConnKafka.Name, *result.ConnectionName) - assert.Equal(t, *currentModel.InstanceName, *result.InstanceName) - assert.Equal(t, *currentModel.Profile, *result.Profile) - assert.Equal(t, *currentModel.ProjectId, *result.ProjectId) - assert.Equal(t, *streamsConnKafka.Type, *result.Type) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetRole(), *result.DbRoleToExecute.Role) - assert.Equal(t, streamsConnKafka.DbRoleToExecute.GetType(), *result.DbRoleToExecute.Type) - }) -} - -func TestGetStreamConnectionSampleTypeModel(t *testing.T) { - streamsConnSample := &admin20231115014.StreamsConnection{ - Name: ptr.String("sample_stream_solar"), - Type: ptr.String("Sample"), - } - testCases := []struct { - model *resource.Model - asserter func(input, result *resource.Model, a *assert.Assertions) - name string - }{ - { - name: "With Nil Current Model", - model: nil, - asserter: func(_, result *resource.Model, a *assert.Assertions) { - a.NotNil(result) - a.Equal(*streamsConnSample.Name, *result.ConnectionName) - a.Equal(*streamsConnSample.Type, *result.Type) - a.Nil(result.DbRoleToExecute) + }, + "GetStreamConnectionModel_sample": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testSampleName), Type: ptr.String("Sample"), + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testSampleName, *result.ConnectionName) + assert.Equal(t, "Sample", *result.Type) + assert.Nil(t, result.DbRoleToExecute) }, }, - { - name: "Sample Stream Solar dataset", - model: &resource.Model{ - Profile: ptr.String("default"), - ProjectId: ptr.String("testProjectID"), - InstanceName: ptr.String("TestInstance"), - ConnectionName: ptr.String("sample_stream_solar"), - Type: ptr.String("Sample"), + "GetStreamConnectionModel_awsLambda": { + testFunc: func(t *testing.T) { + t.Helper() + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.AWSLambdaType), + Aws: &admin.StreamsAWSConnectionConfig{ + RoleArn: ptr.String(testRoleArn), + }, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.AWSLambdaType, *result.Type) + assert.NotNil(t, result.Aws) + assert.Equal(t, testRoleArn, *result.Aws.RoleArn) }, - asserter: func(input, result *resource.Model, a *assert.Assertions) { - a.Equal(*input.InstanceName, *result.InstanceName) - a.Equal(*input.Profile, *result.Profile) - a.Equal(*input.ProjectId, *result.ProjectId) - a.Equal(*input.ConnectionName, *result.ConnectionName) - a.Equal(*input.Type, *result.Type) + }, + "GetStreamConnectionModel_https": { + testFunc: func(t *testing.T) { + t.Helper() + testHeaders := map[string]string{ + "Authorization": "Bearer token123", + "Content-Type": "application/json", + } + streamsConn := &admin.StreamsConnection{ + Name: ptr.String(testConnection), Type: ptr.String(resource.HTTPSType), + Url: ptr.String(testURL), + Headers: &testHeaders, + } + result := resource.GetStreamConnectionModel(streamsConn, nil) + assert.Equal(t, testConnection, *result.ConnectionName) + assert.Equal(t, resource.HTTPSType, *result.Type) + assert.Equal(t, testURL, *result.Url) + assert.NotNil(t, result.Headers) + assert.Equal(t, "Bearer token123", result.Headers["Authorization"]) + assert.Equal(t, "application/json", result.Headers["Content-Type"]) }, }, } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - result := resource.GetStreamConnectionModel(streamsConnSample, tc.model) - tc.asserter(tc.model, result, assert.New(t)) - }) + + for name, tc := range testCases { + t.Run(name, tc.testFunc) } } diff --git a/cfn-resources/stream-connection/cmd/resource/model.go b/cfn-resources/stream-connection/cmd/resource/model.go index 0fe7b9839..6c30646f5 100644 --- a/cfn-resources/stream-connection/cmd/resource/model.go +++ b/cfn-resources/stream-connection/cmd/resource/model.go @@ -8,13 +8,19 @@ type Model struct { Profile *string `json:",omitempty"` ConnectionName *string `json:",omitempty"` InstanceName *string `json:",omitempty"` + WorkspaceName *string `json:",omitempty"` Type *string `json:",omitempty"` ClusterName *string `json:",omitempty"` + ClusterProjectId *string `json:",omitempty"` DbRoleToExecute *DBRoleToExecute `json:",omitempty"` Authentication *StreamsKafkaAuthentication `json:",omitempty"` BootstrapServers *string `json:",omitempty"` Security *StreamsKafkaSecurity `json:",omitempty"` Config map[string]string `json:",omitempty"` + Networking *Networking `json:",omitempty"` + Aws *Aws `json:",omitempty"` + Url *string `json:",omitempty"` + Headers map[string]string `json:",omitempty"` } // DBRoleToExecute is autogenerated from the json schema @@ -25,9 +31,15 @@ type DBRoleToExecute struct { // StreamsKafkaAuthentication is autogenerated from the json schema type StreamsKafkaAuthentication struct { - Mechanism *string `json:",omitempty"` - Username *string `json:",omitempty"` - Password *string `json:",omitempty"` + Mechanism *string `json:",omitempty"` + Method *string `json:",omitempty"` + Username *string `json:",omitempty"` + Password *string `json:",omitempty"` + TokenEndpointUrl *string `json:",omitempty"` + ClientId *string `json:",omitempty"` + ClientSecret *string `json:",omitempty"` + Scope *string `json:",omitempty"` + SaslOauthbearerExtensions *string `json:",omitempty"` } // StreamsKafkaSecurity is autogenerated from the json schema @@ -35,3 +47,19 @@ type StreamsKafkaSecurity struct { BrokerPublicCertificate *string `json:",omitempty"` Protocol *string `json:",omitempty"` } + +// Networking is autogenerated from the json schema +type Networking struct { + Access *Access `json:",omitempty"` +} + +// Access is autogenerated from the json schema +type Access struct { + Type *string `json:",omitempty"` + ConnectionId *string `json:",omitempty"` +} + +// Aws is autogenerated from the json schema +type Aws struct { + RoleArn *string `json:",omitempty"` +} diff --git a/cfn-resources/stream-connection/cmd/resource/resource.go b/cfn-resources/stream-connection/cmd/resource/resource.go index e72b42332..50ac22f8a 100644 --- a/cfn-resources/stream-connection/cmd/resource/resource.go +++ b/cfn-resources/stream-connection/cmd/resource/resource.go @@ -19,9 +19,10 @@ import ( "fmt" "net/http" - admin20231115014 "go.mongodb.org/atlas-sdk/v20231115014/admin" + "go.mongodb.org/atlas-sdk/v20250312012/admin" "github.com/aws-cloudformation/cloudformation-cli-go-plugin/cfn/handler" + "github.com/aws/aws-sdk-go-v2/service/cloudformation/types" "github.com/mongodb/mongodbatlas-cloudformation-resources/util" "github.com/mongodb/mongodbatlas-cloudformation-resources/util/constants" @@ -32,15 +33,40 @@ import ( const ( ClusterConnectionType = "Cluster" KafkaConnectionType = "Kafka" + AWSLambdaType = "AWSLambda" + HTTPSType = "Https" ) -var CreateRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName, constants.Type} -var ReadRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName} -var UpdateRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName, constants.Type} -var DeleteRequiredFields = []string{constants.ProjectID, constants.InstanceName, constants.ConnectionName} -var ListRequiredFields = []string{constants.ProjectID, constants.InstanceName} +var CreateRequiredFields = []string{constants.ProjectID, constants.ConnectionName, constants.Type} +var ReadRequiredFields = []string{constants.ProjectID, constants.ConnectionName} +var UpdateRequiredFields = []string{constants.ProjectID, constants.ConnectionName, constants.Type} +var DeleteRequiredFields = []string{constants.ProjectID, constants.ConnectionName} +var ListRequiredFields = []string{constants.ProjectID} + +func normalizeWorkspaceName(model *Model) *handler.ProgressEvent { + var workspaceOrInstanceName *string + + // Validate that at least one of WorkspaceName or InstanceName is provided + switch { + case model.WorkspaceName != nil && *model.WorkspaceName != "": + workspaceOrInstanceName = model.WorkspaceName + case model.InstanceName != nil && *model.InstanceName != "": + workspaceOrInstanceName = model.InstanceName + default: + return &handler.ProgressEvent{ + OperationStatus: handler.Failed, + Message: "Either WorkspaceName or InstanceName must be provided", + HandlerErrorCode: string(types.HandlerErrorCodeInvalidRequest), + } + } + + // Ensure both WorkspaceName and InstanceName are set for backward compatibility + model.WorkspaceName = workspaceOrInstanceName + model.InstanceName = workspaceOrInstanceName + return nil +} -func initEnvWithLatestClient(req handler.Request, currentModel *Model, requiredFields []string) (*admin20231115014.APIClient, *handler.ProgressEvent) { +var InitEnvWithLatestClient = func(req handler.Request, currentModel *Model, requiredFields []string) (*admin.APIClient, *handler.ProgressEvent) { util.SetupLogger("mongodb-atlas-stream-connection") util.SetDefaultProfileIfNotDefined(¤tModel.Profile) @@ -49,26 +75,31 @@ func initEnvWithLatestClient(req handler.Request, currentModel *Model, requiredF return nil, errEvent } + if peErr := normalizeWorkspaceName(currentModel); peErr != nil { + return nil, peErr + } + client, peErr := util.NewAtlasClient(&req, currentModel.Profile) if peErr != nil { return nil, peErr } - return client.Atlas20231115014, nil + return client.AtlasSDK, nil } func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, CreateRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, CreateRequiredFields) if peErr != nil { return *peErr, nil } + workspaceOrInstanceName := currentModel.WorkspaceName + ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName streamConnectionReq := newStreamConnectionReq(currentModel) - streamConnResp, apiResp, err := conn.StreamsApi.CreateStreamConnection(ctx, *projectID, *instanceName, streamConnectionReq).Execute() + streamConnResp, apiResp, err := conn.StreamsApi.CreateStreamConnection(ctx, *projectID, *workspaceOrInstanceName, streamConnectionReq).Execute() if err != nil { return handleError(apiResp, constants.CREATE, err) } @@ -83,15 +114,16 @@ func Create(req handler.Request, prevModel *Model, currentModel *Model) (handler } func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, ReadRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, ReadRequiredFields) if peErr != nil { return *peErr, nil } + workspaceOrInstanceName := currentModel.WorkspaceName + projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName connectionName := currentModel.ConnectionName - streamConnResp, apiResp, err := conn.StreamsApi.GetStreamConnection(context.Background(), *projectID, *instanceName, *connectionName).Execute() + streamConnResp, apiResp, err := conn.StreamsApi.GetStreamConnection(context.Background(), *projectID, *workspaceOrInstanceName, *connectionName).Execute() if err != nil { return handleError(apiResp, constants.READ, err) } @@ -105,18 +137,19 @@ func Read(req handler.Request, prevModel *Model, currentModel *Model) (handler.P } func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, UpdateRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, UpdateRequiredFields) if peErr != nil { return *peErr, nil } + workspaceOrInstanceName := currentModel.WorkspaceName + ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName connectionName := currentModel.ConnectionName streamConnectionReq := newStreamConnectionReq(currentModel) - streamConnResp, apiResp, err := conn.StreamsApi.UpdateStreamConnection(ctx, *projectID, *instanceName, *connectionName, streamConnectionReq).Execute() + streamConnResp, apiResp, err := conn.StreamsApi.UpdateStreamConnection(ctx, *projectID, *workspaceOrInstanceName, *connectionName, streamConnectionReq).Execute() if err != nil { return handleError(apiResp, constants.UPDATE, err) } @@ -131,17 +164,19 @@ func Update(req handler.Request, prevModel *Model, currentModel *Model) (handler } func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, DeleteRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, DeleteRequiredFields) if peErr != nil { return *peErr, nil } + workspaceOrInstanceName := currentModel.WorkspaceName + ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName connectionName := currentModel.ConnectionName - if _, apiResp, err := conn.StreamsApi.DeleteStreamConnection(ctx, *projectID, *instanceName, *connectionName).Execute(); err != nil { + apiResp, err := conn.StreamsApi.DeleteStreamConnection(ctx, *projectID, *workspaceOrInstanceName, *connectionName).Execute() + if err != nil { return handleError(apiResp, constants.DELETE, err) } @@ -152,17 +187,18 @@ func Delete(req handler.Request, prevModel *Model, currentModel *Model) (handler } func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.ProgressEvent, error) { - conn, peErr := initEnvWithLatestClient(req, currentModel, ListRequiredFields) + conn, peErr := InitEnvWithLatestClient(req, currentModel, ListRequiredFields) if peErr != nil { return *peErr, nil } + workspaceOrInstanceName := currentModel.WorkspaceName + ctx := context.Background() projectID := currentModel.ProjectId - instanceName := currentModel.InstanceName - accumulatedStreamConns, apiResp, err := getAllStreamConnections(ctx, conn, *projectID, *instanceName) + accumulatedStreamConns, apiResp, err := getAllStreamConnections(ctx, conn, *projectID, *workspaceOrInstanceName) if err != nil { return handleError(apiResp, constants.LIST, err) } @@ -171,7 +207,9 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P for i := range accumulatedStreamConns { model := GetStreamConnectionModel(&accumulatedStreamConns[i], nil) model.ProjectId = currentModel.ProjectId - model.InstanceName = currentModel.InstanceName + // InstanceName is deprecated but we maintain it for backward compatibility + model.WorkspaceName = workspaceOrInstanceName + model.InstanceName = workspaceOrInstanceName model.Profile = currentModel.Profile response = append(response, model) @@ -183,14 +221,14 @@ func List(req handler.Request, prevModel *Model, currentModel *Model) (handler.P }, nil } -func getAllStreamConnections(ctx context.Context, conn *admin20231115014.APIClient, projectID, instanceName string) ([]admin20231115014.StreamsConnection, *http.Response, error) { +func getAllStreamConnections(ctx context.Context, conn *admin.APIClient, projectID, workspaceOrInstanceName string) ([]admin.StreamsConnection, *http.Response, error) { pageNum := 1 - accumulatedStreamConns := make([]admin20231115014.StreamsConnection, 0) + accumulatedStreamConns := make([]admin.StreamsConnection, 0) for allRecordsRetrieved := false; !allRecordsRetrieved; { - streamConns, apiResp, err := conn.StreamsApi.ListStreamConnectionsWithParams(ctx, &admin20231115014.ListStreamConnectionsApiParams{ + streamConns, apiResp, err := conn.StreamsApi.ListStreamConnectionsWithParams(ctx, &admin.ListStreamConnectionsApiParams{ GroupId: projectID, - TenantName: instanceName, + TenantName: workspaceOrInstanceName, ItemsPerPage: util.Pointer(constants.DefaultListItemsPerPage), PageNum: util.Pointer(pageNum), }).Execute() diff --git a/cfn-resources/stream-connection/docs/README.md b/cfn-resources/stream-connection/docs/README.md index 4ea42432d..bb4308036 100644 --- a/cfn-resources/stream-connection/docs/README.md +++ b/cfn-resources/stream-connection/docs/README.md @@ -1,6 +1,6 @@ # MongoDB::Atlas::StreamConnection -Returns, adds, edits, and removes one connection for a stream instance in the specified project. To use this resource, the requesting API Key must have the Project Owner roles. +Returns, adds, edits, and removes one connection for a stream workspace in the specified project. To use this resource, the requesting API Key must have the Project Owner roles. ## Syntax @@ -16,13 +16,19 @@ To declare this entity in your AWS CloudFormation template, use the following sy "Profile" : String, "ConnectionName" : String, "InstanceName" : String, + "WorkspaceName" : String, "Type" : String, "ClusterName" : String, + "ClusterProjectId" : String, "DbRoleToExecute" : DBRoleToExecute, "Authentication" : StreamsKafkaAuthentication, "BootstrapServers" : String, "Security" : StreamsKafkaSecurity, - "Config" : Config + "Config" : Config, + "Networking" : Networking, + "Aws" : Aws, + "Url" : String, + "Headers" : Headers } } @@ -36,13 +42,19 @@ Properties: Profile: String ConnectionName: String InstanceName: String + WorkspaceName: String Type: String ClusterName: String + ClusterProjectId: String DbRoleToExecute: DBRoleToExecute Authentication: StreamsKafkaAuthentication BootstrapServers: String Security: StreamsKafkaSecurity Config: Config + Networking: Networking + Aws: Aws + Url: String + Headers: Headers ## Properties @@ -87,9 +99,19 @@ _Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/l #### InstanceName -Human-readable label that identifies the stream instance. +Human-readable label that identifies the stream instance. Deprecated: Use WorkspaceName instead. -_Required_: Yes +_Required_: No + +_Type_: String + +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) + +#### WorkspaceName + +Human-readable label that identifies the stream workspace. + +_Required_: No _Type_: String @@ -97,15 +119,15 @@ _Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/l #### Type -Type of the connection. Can be either Cluster, Kafka, or Sample. +Type of the connection. Can be Cluster, Kafka, Sample, AWSLambda, or Https. _Required_: Yes _Type_: String -_Allowed Values_: Kafka | Cluster | Sample +_Allowed Values_: Kafka | Cluster | Sample | AWSLambda | Https -_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +_Update requires_: [Replacement](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-replacement) #### ClusterName @@ -117,6 +139,22 @@ _Type_: String _Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +#### ClusterProjectId + +Unique 24-hexadecimal digit string that identifies the project containing the cluster for cross-project cluster connections. + +_Required_: No + +_Type_: String + +_Minimum Length_: 24 + +_Maximum Length_: 24 + +_Pattern_: ^([a-f0-9]{24})$ + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + #### DbRoleToExecute The name of a Built in or Custom DB Role to connect to an Atlas Cluster. @@ -167,3 +205,43 @@ _Type_: Config _Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +#### Networking + +Networking configuration for AWS PrivateLink connections. + +_Required_: No + +_Type_: Networking + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Aws + +AWS Lambda connection configuration. + +_Required_: No + +_Type_: Aws + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Url + +URL endpoint for HTTPS type connections. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Headers + +HTTP headers for HTTPS type connections. + +_Required_: No + +_Type_: Headers + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/aws.md b/cfn-resources/stream-connection/docs/aws.md new file mode 100644 index 000000000..66007a6b8 --- /dev/null +++ b/cfn-resources/stream-connection/docs/aws.md @@ -0,0 +1,34 @@ +# MongoDB::Atlas::StreamConnection Aws + +AWS Lambda connection configuration. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "RoleArn" : String
+}
+
+ +### YAML + +
+RoleArn: String
+
+ +## Properties + +#### RoleArn + +Amazon Resource Name (ARN) of the IAM role for AWS Lambda connection. + +_Required_: Yes + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/headers.md b/cfn-resources/stream-connection/docs/headers.md new file mode 100644 index 000000000..24328b6bb --- /dev/null +++ b/cfn-resources/stream-connection/docs/headers.md @@ -0,0 +1,32 @@ +# MongoDB::Atlas::StreamConnection Headers + +HTTP headers for HTTPS type connections. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "^[A-Za-z0-9-]+$" : String
+}
+
+ +### YAML + +
+^[A-Za-z0-9-]+$: String
+
+ +## Properties + +#### \^[A-Za-z0-9-]+$ + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/networking.md b/cfn-resources/stream-connection/docs/networking.md new file mode 100644 index 000000000..2be60063a --- /dev/null +++ b/cfn-resources/stream-connection/docs/networking.md @@ -0,0 +1,32 @@ +# MongoDB::Atlas::StreamConnection Networking + +Networking configuration for AWS PrivateLink connections. + +## Syntax + +To declare this entity in your AWS CloudFormation template, use the following syntax: + +### JSON + +
+{
+    "Access" : Networking
+}
+
+ +### YAML + +
+Access: Networking
+
+ +## Properties + +#### Access + +_Required_: Yes + +_Type_: Networking + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/docs/streamskafkaauthentication.md b/cfn-resources/stream-connection/docs/streamskafkaauthentication.md index 960abc4f9..2c4b6ce62 100644 --- a/cfn-resources/stream-connection/docs/streamskafkaauthentication.md +++ b/cfn-resources/stream-connection/docs/streamskafkaauthentication.md @@ -11,8 +11,14 @@ To declare this entity in your AWS CloudFormation template, use the following sy
 {
     "Mechanism" : String,
+    "Method" : String,
     "Username" : String,
-    "Password" : String
+    "Password" : String,
+    "TokenEndpointUrl" : String,
+    "ClientId" : String,
+    "ClientSecret" : String,
+    "Scope" : String,
+    "SaslOauthbearerExtensions" : String
 }
 
@@ -20,15 +26,31 @@ To declare this entity in your AWS CloudFormation template, use the following sy
 Mechanism: String
+Method: String
 Username: String
 Password: String
+TokenEndpointUrl: String
+ClientId: String
+ClientSecret: String
+Scope: String
+SaslOauthbearerExtensions: String
 
## Properties #### Mechanism -Style of authentication. Can be one of PLAIN, SCRAM-256, or SCRAM-512. +Style of authentication. Can be one of PLAIN, SCRAM-256, SCRAM-512, or OAUTHBEARER. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Method + +OAuth authentication method. _Required_: No @@ -56,3 +78,53 @@ _Type_: String _Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) +#### TokenEndpointUrl + +OAuth token endpoint URL. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### ClientId + +OAuth client ID. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### ClientSecret + +OAuth client secret. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### Scope + +OAuth scope. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + +#### SaslOauthbearerExtensions + +SASL OAuth bearer extensions. + +_Required_: No + +_Type_: String + +_Update requires_: [No interruption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/using-cfn-updating-stacks-update-behaviors.html#update-no-interrupt) + diff --git a/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json b/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json index c6a724f64..defc2228d 100644 --- a/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json +++ b/cfn-resources/stream-connection/mongodb-atlas-streamconnection.json @@ -1,7 +1,7 @@ { "typeName": "MongoDB::Atlas::StreamConnection", "additionalProperties": false, - "description": "Returns, adds, edits, and removes one connection for a stream instance in the specified project. To use this resource, the requesting API Key must have the Project Owner roles.", + "description": "Returns, adds, edits, and removes one connection for a stream workspace in the specified project. To use this resource, the requesting API Key must have the Project Owner roles.", "sourceUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources", "documentationUrl": "https://github.com/mongodb/mongodbatlas-cloudformation-resources/blob/master/cfn-resources/stream-connection/README.md", "definitions": { @@ -30,7 +30,11 @@ "properties": { "Mechanism": { "type": "string", - "description": "Style of authentication. Can be one of PLAIN, SCRAM-256, or SCRAM-512." + "description": "Style of authentication. Can be one of PLAIN, SCRAM-256, SCRAM-512, or OAUTHBEARER." + }, + "Method": { + "type": "string", + "description": "OAuth authentication method." }, "Username": { "type": "string", @@ -40,6 +44,27 @@ "type": "string", "format": "password", "description": "Password of the account to connect to the Kafka cluster. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials." + }, + "TokenEndpointUrl": { + "type": "string", + "description": "OAuth token endpoint URL." + }, + "ClientId": { + "type": "string", + "description": "OAuth client ID." + }, + "ClientSecret": { + "type": "string", + "format": "password", + "description": "OAuth client secret. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials." + }, + "Scope": { + "type": "string", + "description": "OAuth scope." + }, + "SaslOauthbearerExtensions": { + "type": "string", + "description": "SASL OAuth bearer extensions." } }, "additionalProperties": false @@ -68,6 +93,52 @@ } }, "additionalProperties": false + }, + "Networking": { + "type": "object", + "description": "Networking configuration for AWS PrivateLink connections.", + "properties": { + "Access": { + "type": "object", + "description": "Network access configuration.", + "properties": { + "Type": { + "type": "string", + "description": "Type of network access. PRIVATE_ENDPOINT for AWS PrivateLink.", + "enum": [ + "PRIVATE_ENDPOINT", + "PUBLIC" + ] + }, + "ConnectionId": { + "type": "string", + "description": "Unique identifier of the AWS PrivateLink connection." + } + }, + "required": [ + "Type" + ], + "additionalProperties": false + } + }, + "required": [ + "Access" + ], + "additionalProperties": false + }, + "Aws": { + "type": "object", + "description": "AWS Lambda connection configuration.", + "properties": { + "RoleArn": { + "type": "string", + "description": "Amazon Resource Name (ARN) of the IAM role for AWS Lambda connection." + } + }, + "required": [ + "RoleArn" + ], + "additionalProperties": false } }, "properties": { @@ -89,21 +160,34 @@ }, "InstanceName": { "type": "string", - "description": "Human-readable label that identifies the stream instance." + "description": "Human-readable label that identifies the stream instance. WARNING: This field is deprecated and will be removed in the next major release. Please use WorkspaceName instead." + }, + "WorkspaceName": { + "type": "string", + "description": "Human-readable label that identifies the stream workspace." }, "Type": { "type": "string", - "description": "Type of the connection. Can be either Cluster, Kafka, or Sample.", + "description": "Type of the connection. Can be Cluster, Kafka, Sample, AWSLambda, or Https.", "enum": [ "Kafka", "Cluster", - "Sample" + "Sample", + "AWSLambda", + "Https" ] }, "ClusterName": { "type": "string", "description": "Name of the cluster configured for this connection." }, + "ClusterProjectId": { + "type": "string", + "description": "Unique 24-hexadecimal digit string that identifies the project containing the cluster for cross-project cluster connections.", + "maxLength": 24, + "minLength": 24, + "pattern": "^([a-f0-9]{24})$" + }, "DbRoleToExecute": { "$ref": "#/definitions/DBRoleToExecute" }, @@ -119,6 +203,26 @@ }, "Config": { "$ref": "#/definitions/Config" + }, + "Networking": { + "$ref": "#/definitions/Networking" + }, + "Aws": { + "$ref": "#/definitions/Aws" + }, + "Url": { + "type": "string", + "description": "URL endpoint for HTTPS type connections." + }, + "Headers": { + "type": "object", + "description": "HTTP headers for HTTPS type connections.", + "patternProperties": { + "^[A-Za-z0-9-]+$": { + "type": "string" + } + }, + "additionalProperties": false } }, "handlers": { @@ -151,23 +255,25 @@ "primaryIdentifier": [ "/properties/ProjectId", "/properties/ConnectionName", - "/properties/InstanceName", + "/properties/WorkspaceName", "/properties/Profile" ], "required": [ "ProjectId", "ConnectionName", - "InstanceName", "Type" ], "createOnlyProperties": [ "/properties/ProjectId", "/properties/InstanceName", + "/properties/WorkspaceName", "/properties/ConnectionName", + "/properties/Type", "/properties/Profile" ], "writeOnlyProperties": [ - "/properties/Authentication/Password" + "/properties/Authentication/Password", + "/properties/Authentication/ClientSecret" ], "tagging": { "taggable": false diff --git a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh index c64cf16aa..926d49033 100755 --- a/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-create-inputs.sh @@ -18,73 +18,182 @@ fi projectName="${1:-$PROJECT_NAME}" echo "$projectName" -projectId=$(atlas projects list --output json | jq --arg NAME "${projectName}" -r '.results[] | select(.name==$NAME) | .id') -if [ -z "$projectId" ]; then - projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') - echo -e "Created project \"${projectName}\" with id: ${projectId}\n" +# Use existing project ID if set, otherwise try to find or create project +if [ -n "${MONGODB_ATLAS_PROJECT_ID:-}" ]; then + projectId="${MONGODB_ATLAS_PROJECT_ID}" + echo -e "Using existing project ID from MONGODB_ATLAS_PROJECT_ID: ${projectId}\n" else - echo -e "FOUND project \"${projectName}\" with id: ${projectId}\n" + projectId=$(atlas projects list --output json | jq --arg NAME "${projectName}" -r '.results[] | select(.name==$NAME) | .id') + if [ -z "$projectId" ]; then + projectId=$(atlas projects create "${projectName}" --output=json | jq -r '.id') + echo -e "Created project \"${projectName}\" with id: ${projectId}\n" + else + echo -e "FOUND project \"${projectName}\" with id: ${projectId}\n" + fi fi echo -e "=====\nrun this command to clean up\n=====\nmongocli iam projects delete ${projectId} --force\n=====" -instanceName="stream-instance-$(date +%s)-$RANDOM" +workspaceName="stream-workspace-$(date +%s)-$RANDOM" cloudProvider="AWS" clusterName="cluster-$(date +%s)-$RANDOM" -atlas streams instances create "${instanceName}" --projectId "${projectId}" --region VIRGINIA_USA --provider ${cloudProvider} -echo -e "Created StreamInstance \"${instanceName}\"" + +atlas streams instances create "${workspaceName}" --projectId "${projectId}" --region VIRGINIA_USA --provider ${cloudProvider} +echo -e "Created StreamWorkspace \"${workspaceName}\"" atlas clusters create "${clusterName}" --projectId "${projectId}" --backup --provider AWS --region US_EAST_1 --members 3 --tier M10 --diskSizeGB 10 --output=json atlas clusters watch "${clusterName}" --projectId "${projectId}" echo -e "Created Cluster \"${clusterName}\"" +# AWS IAM role creation and authorization for Lambda connections +echo "--------------------------------AWS Lambda IAM Role creation starts ----------------------------" + +# Single IAM role for both CREATE and UPDATE scenarios (following Terraform pattern) +iamRoleName="mongodb-atlas-streams-lambda-$(date +%s)-${RANDOM}" + +echo "Creating IAM role: ${iamRoleName}" + +# Create cloud provider access entry +roleId=$(atlas cloudProviders accessRoles aws create --projectId "${projectId}" --output json | jq -r '.roleId') +echo "Created Atlas cloud provider access entry: ${roleId}" + +# Get Atlas AWS Account ARN and External ID +atlasAWSAccountArn=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleId}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAWSAccountArn') +atlasAssumedRoleExternalId=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg roleID "${roleId}" -r '.awsIamRoles[] | select(.roleId | test($roleID)) | .atlasAssumedRoleExternalId') + +# Create trust policy +jq --arg atlasAssumedRoleExternalId "$atlasAssumedRoleExternalId" \ + --arg atlasAWSAccountArn "$atlasAWSAccountArn" \ + '.Statement[0].Principal.AWS?|=$atlasAWSAccountArn | .Statement[0].Condition.StringEquals["sts:ExternalId"]?|=$atlasAssumedRoleExternalId' \ + "$(dirname "$0")/lambda-role-policy-template.json" >"$(dirname "$0")/lambda-trust-policy.json" + +echo "--------------------------------AWS IAM Role creation starts ----------------------------" + +# Check if role exists, delete if found +awsRoleId=$(aws iam get-role --role-name "${iamRoleName}" 2>/dev/null | jq --arg roleName "${iamRoleName}" -r '.Role | select(.RoleName==$roleName) | .RoleId' || echo "") +if [ -n "$awsRoleId" ]; then + aws iam delete-role --role-name "${iamRoleName}" + echo "Deleted existing role" +fi + +# Create IAM role +awsRoleId=$(aws iam create-role --role-name "${iamRoleName}" --assume-role-policy-document file://"$(dirname "$0")"/lambda-trust-policy.json | jq --arg roleName "${iamRoleName}" -r '.Role | select(.RoleName==$roleName) | .RoleId') +echo "Created AWS IAM role: ${awsRoleId}" + +# Get role ARN +awsArn=$(aws iam get-role --role-name "${iamRoleName}" | jq --arg roleName "${iamRoleName}" -r '.Role | select(.RoleName==$roleName) | .Arn') + +echo "--------------------------------AWS IAM Role creation ends ----------------------------" + +# Wait for AWS IAM role to propagate (similar to encryption-at-rest pattern) +echo "Waiting for IAM role to propagate..." +sleep 65 + +# Authorize the role in Atlas +echo "--------------------------------Authorize MongoDB Atlas Role starts ----------------------------" +atlas cloudProviders accessRoles aws authorize "${roleId}" --iamAssumedRoleArn "${awsArn}" --projectId "${projectId}" +echo "Authorized role: ${iamRoleName}" +echo "--------------------------------Authorize MongoDB Atlas Role ends ----------------------------" + jq --arg cluster_name "$clusterName" \ - --arg instance_name "$instanceName" \ + --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ClusterName?|=$cluster_name | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_1_create.json" >"inputs/inputs_1_create.json" jq --arg cluster_name "$clusterName" \ - --arg instance_name "$instanceName" \ + --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ClusterName?|=$cluster_name | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_1_update.json" >"inputs/inputs_1_update.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_2_create.json" >"inputs/inputs_2_create.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_2_update.json" >"inputs/inputs_2_update.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_3_create.json" >"inputs/inputs_3_create.json" -jq --arg instance_name "$instanceName" \ +jq --arg workspace_name "$workspaceName" \ --arg project_id "$projectId" \ --arg profile "$profile" \ '.Profile?|=$profile | .ProjectId?|=$project_id - | .InstanceName?|=$instance_name' \ + | .WorkspaceName?|=$workspace_name' \ "$(dirname "$0")/inputs_3_update.json" >"inputs/inputs_3_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg role_arn "$awsArn" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .Aws.RoleArn=$role_arn' \ + "$(dirname "$0")/inputs_4_create.json" >"inputs/inputs_4_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + --arg role_arn "$awsArn" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name + | .Aws.RoleArn=$role_arn' \ + "$(dirname "$0")/inputs_4_update.json" >"inputs/inputs_4_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_5_create.json" >"inputs/inputs_5_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_5_update.json" >"inputs/inputs_5_update.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_6_create.json" >"inputs/inputs_6_create.json" + +jq --arg workspace_name "$workspaceName" \ + --arg project_id "$projectId" \ + --arg profile "$profile" \ + '.Profile?|=$profile + | .ProjectId?|=$project_id + | .WorkspaceName?|=$workspace_name' \ + "$(dirname "$0")/inputs_6_update.json" >"inputs/inputs_6_update.json" diff --git a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh index 44f67754b..06a29af75 100755 --- a/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh +++ b/cfn-resources/stream-connection/test/cfn-test-delete-inputs.sh @@ -12,7 +12,20 @@ function usage { projectId=$(jq -r '.ProjectId' ./inputs/inputs_1_create.json) clusterName=$(jq -r '.ClusterName' ./inputs/inputs_1_create.json) -instanceName=$(jq -r '.InstanceName' ./inputs/inputs_1_create.json) + +# Get workspace name or instance name (workspace name takes precedence) +workspaceName=$(jq -r '.WorkspaceName // empty' ./inputs/inputs_1_create.json) +instanceName=$(jq -r '.InstanceName // empty' ./inputs/inputs_1_create.json) + +# Use WorkspaceName if available, otherwise fall back to InstanceName +if [ -n "${workspaceName}" ] && [ "${workspaceName}" != "null" ] && [ "${workspaceName}" != "" ]; then + workspaceOrInstanceName="${workspaceName}" +elif [ -n "${instanceName}" ] && [ "${instanceName}" != "null" ] && [ "${instanceName}" != "" ]; then + workspaceOrInstanceName="${instanceName}" +else + echo "Error: Neither WorkspaceName nor InstanceName found in inputs_1_create.json" + exit 1 +fi if atlas cluster delete "${clusterName}" --projectId "${projectId}" --force; then echo "deleting cluster with name ${clusterName}" @@ -25,12 +38,50 @@ if [ "$status" -eq 0 ]; then echo "Cluster '${clusterName}' has been successfully watched until deletion." fi -#delete stream instance -if atlas streams instances delete "${instanceName}" --projectId "${projectId}" --force; then - echo "deleting stream instance with name ${instanceName}" +#delete stream workspace/instance (using instances delete for backward compatibility) +if atlas streams instances delete "${workspaceOrInstanceName}" --projectId "${projectId}" --force; then + echo "deleting stream workspace/instance with name ${workspaceOrInstanceName}" +else + echo "failed to delete the stream workspace/instance with name ${workspaceOrInstanceName}" +fi + +# Delete AWS Lambda IAM role if it exists +echo "--------------------------------delete AWS Lambda IAM role starts ----------------------------" + +# Check if Lambda input files exist +if [ -f "./inputs/inputs_4_create.json" ]; then + echo "Found Lambda connection inputs, cleaning up IAM role..." + + # Extract role ARN from CREATE input file (same role used for both CREATE and UPDATE) + roleArn=$(jq -r '.Aws.RoleArn // empty' ./inputs/inputs_4_create.json) + # Extract role name from ARN (everything after the last '/') + iamRoleName=$(echo "${roleArn}" | awk -F'/' '{print $NF}') + + # Get external ID from trust policy file and find roleId in Atlas + if [ -f "$(dirname "$0")/lambda-trust-policy.json" ]; then + atlasAssumedRoleExternalId=$(jq -r '.Statement[0].Condition.StringEquals["sts:ExternalId"]' "$(dirname "$0")/lambda-trust-policy.json") + roleId=$(atlas cloudProviders accessRoles list --projectId "${projectId}" --output json | jq --arg extId "${atlasAssumedRoleExternalId}" -r '.awsIamRoles[] | select(.atlasAssumedRoleExternalId | test($extId)) | .roleId') + + if [ -n "${roleId}" ] && [ "${roleId}" != "null" ]; then + echo "Deauthorizing role from Atlas: ${roleId}" + atlas cloudProviders accessRoles aws deauthorize "${roleId}" --projectId "${projectId}" --force || echo "Failed to deauthorize role" + fi + fi + + # Delete IAM role + if [ -n "${iamRoleName}" ] && [ "${iamRoleName}" != "null" ] && [ "${iamRoleName}" != "" ]; then + echo "Deleting IAM role: ${iamRoleName}" + aws iam delete-role --role-name "${iamRoleName}" 2>/dev/null || echo "Role already deleted or doesn't exist" + fi + + # Clean up temporary file + rm -f "$(dirname "$0")/lambda-trust-policy.json" + + echo "Cleaned up Lambda IAM role and temporary files" else - echo "failed to delete the stream instance with name ${instanceName}" + echo "No Lambda connection inputs found, skipping IAM role cleanup" fi +echo "--------------------------------delete AWS Lambda IAM role ends ----------------------------" #delete project if atlas projects delete "$projectId" --force; then diff --git a/cfn-resources/stream-connection/test/inputs_1_create.json b/cfn-resources/stream-connection/test/inputs_1_create.json index cf5167243..11e56aae1 100644 --- a/cfn-resources/stream-connection/test/inputs_1_create.json +++ b/cfn-resources/stream-connection/test/inputs_1_create.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ClusterName": "", "ConnectionName": "ConnectionNameCluster", "Type": "Cluster", diff --git a/cfn-resources/stream-connection/test/inputs_1_update.json b/cfn-resources/stream-connection/test/inputs_1_update.json index c653114ae..285ca5635 100644 --- a/cfn-resources/stream-connection/test/inputs_1_update.json +++ b/cfn-resources/stream-connection/test/inputs_1_update.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ClusterName": "", "ConnectionName": "ConnectionNameCluster", "Type": "Cluster", diff --git a/cfn-resources/stream-connection/test/inputs_2_create.json b/cfn-resources/stream-connection/test/inputs_2_create.json index c454e2279..f99459cbb 100644 --- a/cfn-resources/stream-connection/test/inputs_2_create.json +++ b/cfn-resources/stream-connection/test/inputs_2_create.json @@ -1,14 +1,14 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "ConnectionNameKafka", "Type": "Kafka", "BootstrapServers": "localhost:9092,localhost:9092", "Authentication": { "Mechanism": "PLAIN", "Username": "user1", - "Password": "rawpassword" + "Password": "" }, "Security": { "Protocol": "PLAINTEXT" diff --git a/cfn-resources/stream-connection/test/inputs_2_update.json b/cfn-resources/stream-connection/test/inputs_2_update.json index 19076817f..c76dea7f4 100644 --- a/cfn-resources/stream-connection/test/inputs_2_update.json +++ b/cfn-resources/stream-connection/test/inputs_2_update.json @@ -1,14 +1,14 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "ConnectionNameKafka", "Type": "Kafka", "BootstrapServers": "localhost:9092,localhost:9092", "Authentication": { "Mechanism": "PLAIN", "Username": "user1", - "Password": "rawpassword" + "Password": "" }, "Security": { "Protocol": "SSL", diff --git a/cfn-resources/stream-connection/test/inputs_3_create.json b/cfn-resources/stream-connection/test/inputs_3_create.json index 561899222..0ba44a39b 100644 --- a/cfn-resources/stream-connection/test/inputs_3_create.json +++ b/cfn-resources/stream-connection/test/inputs_3_create.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "sample_stream_solar", "Type": "Sample" } diff --git a/cfn-resources/stream-connection/test/inputs_3_update.json b/cfn-resources/stream-connection/test/inputs_3_update.json index 561899222..0ba44a39b 100644 --- a/cfn-resources/stream-connection/test/inputs_3_update.json +++ b/cfn-resources/stream-connection/test/inputs_3_update.json @@ -1,7 +1,7 @@ { "Profile": "default", "ProjectId": "", - "InstanceName": "", + "WorkspaceName": "", "ConnectionName": "sample_stream_solar", "Type": "Sample" } diff --git a/cfn-resources/stream-connection/test/inputs_4_create.json b/cfn-resources/stream-connection/test/inputs_4_create.json new file mode 100644 index 000000000..b99f6f9f6 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_4_create.json @@ -0,0 +1,11 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameAWSLambda", + "Type": "AWSLambda", + "Aws": { + "RoleArn": "" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_4_update.json b/cfn-resources/stream-connection/test/inputs_4_update.json new file mode 100644 index 000000000..b99f6f9f6 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_4_update.json @@ -0,0 +1,11 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameAWSLambda", + "Type": "AWSLambda", + "Aws": { + "RoleArn": "" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_5_create.json b/cfn-resources/stream-connection/test/inputs_5_create.json new file mode 100644 index 000000000..00c6b7be4 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_5_create.json @@ -0,0 +1,13 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameHttps", + "Type": "Https", + "Url": "https://api.example.com/webhook", + "Headers": { + "Content-Type": "application/json", + "Authorization": "Bearer token123" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_5_update.json b/cfn-resources/stream-connection/test/inputs_5_update.json new file mode 100644 index 000000000..fe4ed45f8 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_5_update.json @@ -0,0 +1,14 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameHttps", + "Type": "Https", + "Url": "https://api.example.com/webhook/v2", + "Headers": { + "Content-Type": "application/json", + "Authorization": "Bearer updated-token-456", + "X-API-Key": "new-api-key" + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_6_create.json b/cfn-resources/stream-connection/test/inputs_6_create.json new file mode 100644 index 000000000..f402e8b6a --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_6_create.json @@ -0,0 +1,30 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameKafkaOAuth", + "Type": "Kafka", + "BootstrapServers": "pkc-example.us-east-1.aws.confluent.cloud:9092", + "Authentication": { + "Mechanism": "OAUTHBEARER", + "Method": "OIDC", + "TokenEndpointUrl": "https://oauth.example.com/oauth2/token", + "ClientId": "test-client-id-12345", + "ClientSecret": "", + "Scope": "read:messages write:messages", + "SaslOauthbearerExtensions": "logicalCluster=lkc-test,identityPoolId=pool-test" + }, + "Security": { + "Protocol": "SASL_SSL", + "BrokerPublicCertificate": "-----BEGIN CERTIFICATE-----\nMIIENTCCAx2gAwIBAgIJAPWNjXbYMr7lMA0GCSqGSIb3DQEBCwUAMGwxCzAJBgNV\nBAYTAklFMQowCAYDVQQIDAFEMQ8wDQYDVQQHDAZEdWJsaW4xFTATBgNVBAoMDERP\nIE5PVCBUUlVTVDEVMBMGA1UECwwMRE8gTk9UIFRSVVNUMRIwEAYDVQQDDAlsb2Nh\nbGhvc3QwHhcNMjIwNDE5MTYxNDI5WhcNMjMwOTAxMTYxNDI5WjBsMQswCQYDVQQG\nEwJJRTEKMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBO\nT1QgVFJVU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxo\nb3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv6tWJkTr99TuxWN2\nih7uXVIbjRCd1pLTvmoZxHee4TYbs7zwHCzanbTeqQ2LOZlrqHLwmJ9E+xrkDSsB\nmlDfI3J9f5dIBeEZAZDP9GcZ64KCLq4PgdQV0YLPiuwYyEuIPZrDkNY7weVqBpk9\noEf4HLktxHx+zbsp6/SxAMKCYBTcy8wioccdLI8lBLJeVOl/KsuxfkGILoH+ryl5\nqBdYGeZzGnOjU4cJVFOCvJ7zJDn2ASGghO7JbmKPotr/NeY0MXEKJR4zHIHyYvRh\nKit5V5bq3DJw5kp0TFkVpjhRaMaLkaP8w97bEvaOthV5fJB94WG44eEuYhuO/xyY\nh2SLEwIDAQABo4HZMIHWMIGGBgNVHSMEfzB9oXCkbjBsMQswCQYDVQQGEwJJRTEK\nMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBOT1QgVFJV\nU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxob3N0ggkA\n2D7GIAQ8CcgwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwEGCCsGAQUFBwMCMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDANBgkqhkiG9w0B\nAQsFAAOCAQEAgKINT8ASLnG/k/+H68iqoPfb49melXKtRiVG5jYlCN8P7v3Yj/AT\nm3Wbq/cGayd2sewh4UgvkmUWEuw6OCBsORT/E9+teq7G/XbWK6YGpc7WCzJT0kJD\n8sOK2LuRegPM7gEoIZ5KBycVBxB3mLkIyiOeFpCK+ZoW8gd9Ug2ZNK4YAyMDFfW9\nyJ7hJThLZmckaMZBY83yrSD3BTevLN22cWphj9Sna7BW+7c5Pqw3W9i4YO4wSmwU\nJ1FPS2VF0Pz5ORDNp5fgz2JVS4b3k2IQ0dEIXQW3OeBO1i7p+frUOroQFu8ZXLac\nromOggcaq3uWOek9yP+3XusUjXWJ3ZPPsA==\n-----END CERTIFICATE-----" + }, + "Config": { + "auto.offset.reset": "earliest" + }, + "Networking": { + "Access": { + "Type": "PUBLIC" + } + } +} + diff --git a/cfn-resources/stream-connection/test/inputs_6_update.json b/cfn-resources/stream-connection/test/inputs_6_update.json new file mode 100644 index 000000000..faae07728 --- /dev/null +++ b/cfn-resources/stream-connection/test/inputs_6_update.json @@ -0,0 +1,30 @@ +{ + "Profile": "default", + "ProjectId": "", + "WorkspaceName": "", + "ConnectionName": "ConnectionNameKafkaOAuth", + "Type": "Kafka", + "BootstrapServers": "pkc-example.us-east-1.aws.confluent.cloud:9092", + "Authentication": { + "Mechanism": "OAUTHBEARER", + "Method": "OIDC", + "TokenEndpointUrl": "https://oauth.example.com/oauth2/token", + "ClientId": "test-client-id-updated", + "ClientSecret": "", + "Scope": "read:messages write:messages delete:messages", + "SaslOauthbearerExtensions": "logicalCluster=lkc-updated,identityPoolId=pool-updated" + }, + "Security": { + "Protocol": "SASL_SSL", + "BrokerPublicCertificate": "-----BEGIN CERTIFICATE-----\nMIIENTCCAx2gAwIBAgIJAPWNjXbYMr7lMA0GCSqGSIb3DQEBCwUAMGwxCzAJBgNV\nBAYTAklFMQowCAYDVQQIDAFEMQ8wDQYDVQQHDAZEdWJsaW4xFTATBgNVBAoMDERP\nIE5PVCBUUlVTVDEVMBMGA1UECwwMRE8gTk9UIFRSVVNUMRIwEAYDVQQDDAlsb2Nh\nbGhvc3QwHhcNMjIwNDE5MTYxNDI5WhcNMjMwOTAxMTYxNDI5WjBsMQswCQYDVQQG\nEwJJRTEKMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBO\nT1QgVFJVU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxo\nb3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv6tWJkTr99TuxWN2\nih7uXVIbjRCd1pLTvmoZxHee4TYbs7zwHCzanbTeqQ2LOZlrqHLwmJ9E+xrkDSsB\nmlDfI3J9f5dIBeEZAZDP9GcZ64KCLq4PgdQV0YLPiuwYyEuIPZrDkNY7weVqBpk9\noEf4HLktxHx+zbsp6/SxAMKCYBTcy8wioccdLI8lBLJeVOl/KsuxfkGILoH+ryl5\nqBdYGeZzGnOjU4cJVFOCvJ7zJDn2ASGghO7JbmKPotr/NeY0MXEKJR4zHIHyYvRh\nKit5V5bq3DJw5kp0TFkVpjhRaMaLkaP8w97bEvaOthV5fJB94WG44eEuYhuO/xyY\nh2SLEwIDAQABo4HZMIHWMIGGBgNVHSMEfzB9oXCkbjBsMQswCQYDVQQGEwJJRTEK\nMAgGA1UECAwBRDEPMA0GA1UEBwwGRHVibGluMRUwEwYDVQQKDAxETyBOT1QgVFJV\nU1QxFTATBgNVBAsMDERPIE5PVCBUUlVTVDESMBAGA1UEAwwJbG9jYWxob3N0ggkA\n2D7GIAQ8CcgwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB\nBQUHAwEGCCsGAQUFBwMCMBQGA1UdEQQNMAuCCWxvY2FsaG9zdDANBgkqhkiG9w0B\nAQsFAAOCAQEAgKINT8ASLnG/k/+H68iqoPfb49melXKtRiVG5jYlCN8P7v3Yj/AT\nm3Wbq/cGayd2sewh4UgvkmUWEuw6OCBsORT/E9+teq7G/XbWK6YGpc7WCzJT0kJD\n8sOK2LuRegPM7gEoIZ5KBycVBxB3mLkIyiOeFpCK+ZoW8gd9Ug2ZNK4YAyMDFfW9\nyJ7hJThLZmckaMZBY83yrSD3BTevLN22cWphj9Sna7BW+7c5Pqw3W9i4YO4wSmwU\nJ1FPS2VF0Pz5ORDNp5fgz2JVS4b3k2IQ0dEIXQW3OeBO1i7p+frUOroQFu8ZXLac\nromOggcaq3uWOek9yP+3XusUjXWJ3ZPPsA==\n-----END CERTIFICATE-----" + }, + "Config": { + "auto.offset.reset": "latest" + }, + "Networking": { + "Access": { + "Type": "PUBLIC" + } + } +} + diff --git a/cfn-resources/stream-connection/test/lambda-permissions-template.json b/cfn-resources/stream-connection/test/lambda-permissions-template.json new file mode 100644 index 000000000..fdeef7e80 --- /dev/null +++ b/cfn-resources/stream-connection/test/lambda-permissions-template.json @@ -0,0 +1,14 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "lambda:InvokeFunction", + "lambda:InvokeAsync" + ], + "Resource": "*" + } + ] +} + diff --git a/cfn-resources/stream-connection/test/lambda-role-policy-template.json b/cfn-resources/stream-connection/test/lambda-role-policy-template.json new file mode 100644 index 000000000..e8209f2b6 --- /dev/null +++ b/cfn-resources/stream-connection/test/lambda-role-policy-template.json @@ -0,0 +1,18 @@ +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "" + }, + "Action": "sts:AssumeRole", + "Condition": { + "StringEquals": { + "sts:ExternalId": "" + } + } + } + ] +} + diff --git a/examples/atlas-streams/stream-connection/README.md b/examples/atlas-streams/stream-connection/README.md new file mode 100644 index 000000000..c06455f46 --- /dev/null +++ b/examples/atlas-streams/stream-connection/README.md @@ -0,0 +1,39 @@ +# How to create a MongoDB::Atlas::StreamConnection + +## Step 1: Activate the stream connection resource in cloudformation + Step a: Create Role using [execution-role.yaml](https://github.com/mongodb/mongodbatlas-cloudformation-resources/blob/master/examples/execution-role.yaml) in CFN resources folder. + + Step b: Search for Mongodb::Atlas::StreamConnection resource. + + (CloudFormation > Public extensions > choose 'Third party' > Search with " Execution name prefix = MongoDB " ) + Step c: Select and activate + Enter the RoleArn that is created in step 1. + + Your StreamConnection Resource is ready to use. + +## Step 2: Create template using example JSON files + Examples for each connection type: + + **Cluster type** - Connect to an Atlas cluster: + - [cluster-stream-connection.json](cluster-stream-connection.json) + + **Kafka type** - Connect to a Kafka cluster: + - [kafka-stream-connection.json](kafka-stream-connection.json) + - [kafka-oauth-stream-connection.json](kafka-oauth-stream-connection.json) + + **Sample type** - Use sample datasets: + - [sample-stream-connection.json](sample-stream-connection.json) + + **AWSLambda type** - Connect to AWS Lambda: + - [aws-lambda-stream-connection.json](aws-lambda-stream-connection.json) + + **Https type** - Connect via HTTPS: + - [https-stream-connection.json](https-stream-connection.json) + + Note: Make sure you are providing appropriate values for: + 1. ProjectId + 2. WorkspaceName (or InstanceName - deprecated) + 3. ConnectionName + 4. Type: Cluster, Kafka, Sample, AWSLambda, or Https + 5. Profile (optional) + 6. Type-specific fields (ClusterName for Cluster type, BootstrapServers for Kafka type, etc.) diff --git a/examples/atlas-streams/stream-connection/aws-lambda-stream-connection.json b/examples/atlas-streams/stream-connection/aws-lambda-stream-connection.json new file mode 100644 index 000000000..fd7f3ec67 --- /dev/null +++ b/examples/atlas-streams/stream-connection/aws-lambda-stream-connection.json @@ -0,0 +1,77 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates one connection of type 'AWSLambda' for a given stream instance in the specified project", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id", + "MinLength": 24, + "MaxLength": 24, + "AllowedPattern": "^([a-f0-9]{24})$" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream workspace" + }, + "ConnectionName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream connection", + "Default": "AWSLambdaConnection" + }, + "LambdaRoleArn": { + "Type": "String", + "Description": "Amazon Resource Name (ARN) of the IAM role that Stream Processing will assume to access AWS Lambda. The role must have permissions to invoke Lambda functions.", + "AllowedPattern": "^arn:aws:iam::[0-9]{12}:role/[a-zA-Z0-9+=,.@_-]+$" + } + }, + "Mappings": {}, + "Resources": { + "StreamConnection": { + "Type": "MongoDB::Atlas::StreamConnection", + "Properties": { + "ProjectId": { + "Ref": "ProjectId" + }, + "Profile": { + "Ref": "Profile" + }, + "ConnectionName": { + "Ref": "ConnectionName" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "Type": "AWSLambda", + "Aws": { + "RoleArn": { + "Ref": "LambdaRoleArn" + } + } + } + } + }, + "Outputs": { + "ConnectionName": { + "Description": "Name of the created stream connection", + "Value": { + "Ref": "ConnectionName" + } + }, + "ConnectionType": { + "Description": "Type of the stream connection", + "Value": "AWSLambda" + }, + "LambdaRoleArn": { + "Description": "IAM Role ARN used for Lambda connection", + "Value": { + "Ref": "LambdaRoleArn" + } + } + } +} + diff --git a/examples/atlas-streams/stream-connection/cluster-stream-connection.json b/examples/atlas-streams/stream-connection/cluster-stream-connection.json index da3b5eec8..2672c227d 100644 --- a/examples/atlas-streams/stream-connection/cluster-stream-connection.json +++ b/examples/atlas-streams/stream-connection/cluster-stream-connection.json @@ -11,9 +11,9 @@ "Type": "String", "Description": "Atlas Project Id" }, - "InstanceName": { + "WorkspaceName": { "Type": "String", - "Description": "Human-readable label that identifies the stream instance" + "Description": "Human-readable label that identifies the stream workspace" }, "ConnectionName": { "Type": "String", @@ -50,8 +50,8 @@ "ConnectionName": { "Ref": "ConnectionName" }, - "InstanceName": { - "Ref": "InstanceName" + "WorkspaceName": { + "Ref": "WorkspaceName" }, "ClusterName": { "Ref": "ClusterName" diff --git a/examples/atlas-streams/stream-connection/https-stream-connection.json b/examples/atlas-streams/stream-connection/https-stream-connection.json new file mode 100644 index 000000000..b0485a7ac --- /dev/null +++ b/examples/atlas-streams/stream-connection/https-stream-connection.json @@ -0,0 +1,79 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates one connection of type 'Https' for a given stream instance in the specified project", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id", + "MinLength": 24, + "MaxLength": 24, + "AllowedPattern": "^([a-f0-9]{24})$" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream workspace" + }, + "ConnectionName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream connection", + "Default": "HttpsConnection" + }, + "HttpsUrl": { + "Type": "String", + "Description": "URL endpoint for the HTTPS connection. Must be a valid HTTPS URL.", + "AllowedPattern": "^https://.*" + } + }, + "Mappings": {}, + "Resources": { + "StreamConnection": { + "Type": "MongoDB::Atlas::StreamConnection", + "Properties": { + "ProjectId": { + "Ref": "ProjectId" + }, + "Profile": { + "Ref": "Profile" + }, + "ConnectionName": { + "Ref": "ConnectionName" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "Type": "Https", + "Url": { + "Ref": "HttpsUrl" + }, + "Headers": { + "Content-Type": "application/json", + "Authorization": "Bearer updated-token-67891", + "X-API-Key": "my-new-api-key-123456" + } + } + } + }, + "Outputs": { + "ConnectionName": { + "Description": "Name of the created stream connection", + "Value": { + "Ref": "ConnectionName" + } + }, + "ConnectionType": { + "Description": "Type of the stream connection", + "Value": "Https" + }, + "HttpsUrl": { + "Description": "HTTPS endpoint URL", + "Value": { + "Ref": "HttpsUrl" + } + } + } +} diff --git a/examples/atlas-streams/stream-connection/kafka-oauth-stream-connection.json b/examples/atlas-streams/stream-connection/kafka-oauth-stream-connection.json new file mode 100644 index 000000000..df9fb0919 --- /dev/null +++ b/examples/atlas-streams/stream-connection/kafka-oauth-stream-connection.json @@ -0,0 +1,164 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Description": "This template creates one connection of type 'Kafka' with OAuth authentication for a given stream instance in the specified project", + "Parameters": { + "Profile": { + "Type": "String", + "Default": "default", + "Description": "Secret Manager Profile that contains the Atlas Programmatic keys" + }, + "ProjectId": { + "Type": "String", + "Description": "Atlas Project Id", + "MinLength": 24, + "MaxLength": 24, + "AllowedPattern": "^([a-f0-9]{24})$" + }, + "WorkspaceName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream workspace" + }, + "ConnectionName": { + "Type": "String", + "Description": "Human-readable label that identifies the stream connection", + "Default": "KafkaOAuthConnection" + }, + "BootstrapServers": { + "Type": "String", + "Description": "Comma separated list of Kafka broker addresses (e.g., 'pkc-xxxxx.us-east-1.aws.confluent.cloud:9092')" + }, + "OAuthTokenEndpointUrl": { + "Type": "String", + "Description": "OAuth 2.0 token endpoint URL for authentication" + }, + "OAuthClientId": { + "Type": "String", + "Description": "OAuth 2.0 client identifier" + }, + "OAuthClientSecret": { + "Type": "String", + "Description": "OAuth 2.0 client secret. Review [AWS security best practices for CloudFormation](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/security-best-practices.html#creds) to manage credentials.", + "NoEcho": true + }, + "OAuthScope": { + "Type": "String", + "Description": "OAuth 2.0 scope (e.g., 'read:messages write:messages')", + "Default": "" + }, + "SaslOauthbearerExtensions": { + "Type": "String", + "Description": "SASL OAUTHBEARER extensions (e.g., 'logicalCluster=lkc-kmom,identityPoolId=pool-lAr')", + "Default": "" + }, + "SecurityProtocol": { + "Type": "String", + "Description": "Describes the transport type. For OAuth, typically SASL_SSL.", + "Default": "SASL_SSL", + "AllowedValues": [ + "PLAINTEXT", + "SSL", + "SASL_PLAINTEXT", + "SASL_SSL" + ] + }, + "BrokerPublicCertificate": { + "Type": "String", + "Description": "A trusted, public x509 certificate for connecting to Kafka over SSL. Required if SecurityProtocol is SSL or SASL_SSL.", + "Default": "" + }, + "ConfigAutoOffsetReset": { + "Type": "String", + "Description": "Kafka consumer configuration for auto.offset.reset", + "Default": "earliest", + "AllowedValues": [ + "earliest", + "latest", + "none" + ] + } + }, + "Mappings": {}, + "Resources": { + "StreamConnection": { + "Type": "MongoDB::Atlas::StreamConnection", + "Properties": { + "ProjectId": { + "Ref": "ProjectId" + }, + "Profile": { + "Ref": "Profile" + }, + "ConnectionName": { + "Ref": "ConnectionName" + }, + "WorkspaceName": { + "Ref": "WorkspaceName" + }, + "Type": "Kafka", + "BootstrapServers": { + "Ref": "BootstrapServers" + }, + "Authentication": { + "Mechanism": "OAUTHBEARER", + "Method": "OIDC", + "TokenEndpointUrl": { + "Ref": "OAuthTokenEndpointUrl" + }, + "ClientId": { + "Ref": "OAuthClientId" + }, + "ClientSecret": { + "Ref": "OAuthClientSecret" + }, + "Scope": { + "Ref": "OAuthScope" + }, + "SaslOauthbearerExtensions": { + "Ref": "SaslOauthbearerExtensions" + } + }, + "Security": { + "Protocol": { + "Ref": "SecurityProtocol" + }, + "BrokerPublicCertificate": { + "Ref": "BrokerPublicCertificate" + } + }, + "Config": { + "auto.offset.reset": { + "Ref": "ConfigAutoOffsetReset" + } + }, + "Networking": { + "Access": { + "Type": "PUBLIC" + } + } + } + } + }, + "Outputs": { + "ConnectionName": { + "Description": "Name of the created stream connection", + "Value": { + "Ref": "ConnectionName" + } + }, + "ConnectionType": { + "Description": "Type of the stream connection", + "Value": "Kafka" + }, + "AuthenticationMechanism": { + "Description": "Kafka authentication mechanism", + "Value": "OAUTHBEARER" + }, + "BootstrapServers": { + "Description": "Kafka bootstrap servers", + "Value": { + "Ref": "BootstrapServers" + } + } + } +} + diff --git a/examples/atlas-streams/stream-connection/kafka-stream-connection.json b/examples/atlas-streams/stream-connection/kafka-stream-connection.json index 4b8fe0b8c..2040e2f1d 100644 --- a/examples/atlas-streams/stream-connection/kafka-stream-connection.json +++ b/examples/atlas-streams/stream-connection/kafka-stream-connection.json @@ -11,9 +11,9 @@ "Type": "String", "Description": "Atlas Project Id" }, - "InstanceName": { + "WorkspaceName": { "Type": "String", - "Description": "Human-readable label that identifies the stream instance" + "Description": "Human-readable label that identifies the stream workspace" }, "ConnectionName": { "Type": "String", @@ -59,8 +59,8 @@ "ConnectionName": { "Ref": "ConnectionName" }, - "InstanceName": { - "Ref": "InstanceName" + "WorkspaceName": { + "Ref": "WorkspaceName" }, "Type": "Kafka", "Authentication": { diff --git a/examples/atlas-streams/stream-connection/sample-stream-connection.json b/examples/atlas-streams/stream-connection/sample-stream-connection.json index aef9fcc4e..d03486c8c 100644 --- a/examples/atlas-streams/stream-connection/sample-stream-connection.json +++ b/examples/atlas-streams/stream-connection/sample-stream-connection.json @@ -11,9 +11,9 @@ "Type": "String", "Description": "Atlas Project Id" }, - "InstanceName": { + "WorkspaceName": { "Type": "String", - "Description": "Human-readable label that identifies the stream instance" + "Description": "Human-readable label that identifies the stream workspace" }, "ConnectionName": { "Type": "String", @@ -32,8 +32,8 @@ "Profile": { "Ref": "Profile" }, - "InstanceName": { - "Ref": "InstanceName" + "WorkspaceName": { + "Ref": "WorkspaceName" }, "Type": "Sample", "ConnectionName": {