import os
from openai import OpenAI
from jeanmemory import JeanMemoryClient
# 1. Initialize the clients
jean = JeanMemoryClient(api_key=os.environ.get("JEAN_API_KEY"))
openai = OpenAI(api_key=os.environ.get("OPENAI_API_KEY"))
# 2. Get the user token from your frontend (or use auto test user)
# Production: Token from OAuth flow via @jeanmemory/react
# Development: Leave empty for automatic test user
user_token = get_user_token_from_request() # Or None for test user
# 3. Get context from Jean Memory
user_message = "What were the key takeaways from our last meeting about Project Phoenix?"
context_response = jean.get_context(
user_token=user_token,
message=user_message,
# All defaults: tool="jean_memory", speed="balanced", format="enhanced"
)
# 4. Engineer your final prompt
final_prompt = f"""
Using the following context, please answer the user's question.
The context is a summary of the user's memories related to their question.
Context:
---
{context_response.text}
---
User Question: {user_message}
"""
# 5. Call your LLM
completion = openai.chat.completions.create(
model="gpt-4-turbo",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": final_prompt},
],
)
print(completion.choices[0].message.content)