Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 4 additions & 4 deletions packages/ai-providers/server-ai-langchain/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -37,12 +37,12 @@ pip install langchain-google-genai
```python
import asyncio
from ldclient import LDClient, Config, Context
from ldai import init
from ldai import LDAIClient
from ldai.models import AICompletionConfigDefault, ModelConfig, ProviderConfig

# Initialize LaunchDarkly client
ld_client = LDClient(Config("your-sdk-key"))
ai_client = init(ld_client)
ai_client = LDAIClient(ld_client)

context = Context.builder("user-123").build()

Expand Down Expand Up @@ -84,11 +84,11 @@ if model:
### Using the runner directly

If you need to construct a runner manually (e.g. for testing), you can use
`LangChainRunnerFactory` from the `ldai_langchain` package:
`LangChainModelRunner` from the `ldai_langchain` package:

```python
from langchain_openai import ChatOpenAI
from ldai_langchain import LangChainRunnerFactory
from ldai_langchain import LangChainModelRunner

llm = ChatOpenAI(model="gpt-4", temperature=0.7)
runner = LangChainModelRunner(llm)
Expand Down
4 changes: 2 additions & 2 deletions packages/ai-providers/server-ai-openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,12 +24,12 @@ pip install launchdarkly-server-sdk-ai-openai
```python
import asyncio
from ldclient import LDClient, Config, Context
from ldai import init
from ldai import LDAIClient
from ldai.models import AICompletionConfigDefault, ModelConfig, ProviderConfig

# Initialize LaunchDarkly client
ld_client = LDClient(Config("your-sdk-key"))
ai_client = init(ld_client)
ai_client = LDAIClient(ld_client)

context = Context.builder("user-123").build()

Expand Down
26 changes: 11 additions & 15 deletions packages/sdk/server-ai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -123,16 +123,12 @@ async def main():
)

if model:
# Simple conversation flow - metrics are automatically tracked by invoke()
response1 = await model.invoke('I need help with my order')
print(response1.message.content)
# Simple conversation flow - metrics are automatically tracked by run()
response1 = await model.run('I need help with my order')
print(response1.content)

response2 = await model.invoke("What's the status?")
print(response2.message.content)

# Access conversation history
messages = model.get_messages()
print(f'Conversation has {len(messages)} messages')
response2 = await model.run("What's the status?")
print(response2.content)

asyncio.run(main())
```
Expand All @@ -146,21 +142,20 @@ For more control, you can use the configuration directly with AI providers. We r
```python
import asyncio
from ldai import LDAIClient, AICompletionConfigDefault, ModelConfig
from ldai.providers.types import LDAIMetrics, TokenUsage

from ldai_langchain import LangChainProvider
from ldai_langchain import create_langchain_model, get_ai_metrics_from_response

async def main():
ai_config = ai_client.completion_config(ai_config_key, context, default)

# Create LangChain model from configuration
llm = await LangChainProvider.create_langchain_model(ai_config)
llm = create_langchain_model(ai_config)

# Use with tracking (sync invoke). Mint a tracker once per AI run.
tracker = ai_config.create_tracker()
response = tracker.track_metrics_of(
get_ai_metrics_from_response,
lambda: llm.invoke(messages),
lambda result: LangChainProvider.get_ai_metrics_from_response(result)
)

print('AI Response:', response.content)
Expand All @@ -173,7 +168,8 @@ asyncio.run(main())
```python
import asyncio
from ldai import LDAIClient, AICompletionConfigDefault, ModelConfig
from ldai.providers.types import LDAIMetrics, TokenUsage
from ldai.providers import LDAIMetrics
from ldai.tracker import TokenUsage

async def main():
ai_config = ai_client.completion_config(ai_config_key, context, default)
Expand All @@ -200,8 +196,8 @@ async def main():
# Mint a tracker once per AI run.
tracker = ai_config.create_tracker()
result = await tracker.track_metrics_of_async(
map_custom_provider_metrics,
call_custom_provider,
map_custom_provider_metrics
)

print('AI Response:', result.content)
Expand Down
Loading