Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions veadk/knowledgebase/knowledgebase.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ class KnowledgeBase(BaseModel):

enable_profile: bool = False

query_with_user_profile: bool = False

def model_post_init(self, __context: Any) -> None:
if isinstance(self.backend, BaseKnowledgebaseBackend):
self._backend = self.backend
Expand All @@ -134,6 +136,11 @@ def model_post_init(self, __context: Any) -> None:
f"Initialized knowledgebase with backend {self._backend.__class__.__name__}"
)

if self.query_with_user_profile:
logger.info(
"Enable user profile querying for knowledgebase. You *must* use Viking Memory backend to enjoy this feature."
)

def add_from_directory(self, directory: str, **kwargs) -> bool:
"""Add knowledge from file path to knowledgebase.

Expand Down
86 changes: 10 additions & 76 deletions veadk/memory/long_term_memory.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,68 +100,6 @@ class LongTermMemory(BaseMemoryService, BaseModel):

Notes:
Please ensure that you have set the embedding-related configurations in environment variables.

Examples:
### Simple long-term memory

Once create a long-term memory withou any arguments, all configurations are come from **environment variables**.

```python
import asyncio

from veadk import Agent, Runner
from veadk.memory.long_term_memory import LongTermMemory
from veadk.memory.short_term_memory import ShortTermMemory

app_name = "veadk_playground_app"
user_id = "veadk_playground_user"

long_term_memory = LongTermMemory(backend="local", app_name=app_name)

agent = Agent(long_term_memory=long_term_memory)

runner = Runner(
agent=agent,
app_name=app_name,
user_id=user_id,
short_term_memory=ShortTermMemory(),
)

# ===== add memory =====
session_id = "veadk_playground_session"
teaching_prompt = "I brought an ice-cream last week."

asyncio.run(runner.run(messages=teaching_prompt, session_id=session_id))
asyncio.run(
runner.save_session_to_long_term_memory(session_id=session_id)
) # save session to long-term memory


# ===== check memory =====
session_id = "veadk_playground_session_2" # use a new session
student_prompt = "What I brought last week?"

response = asyncio.run(runner.run(messages=student_prompt, session_id=session_id))

print(response)
```

### Create with a backend instance

```python
from veadk.memory.long_term_memory import LongTermMemory
from veadk.memory.long_term_memory.backends import LongTermMemory

long_term_memory = LongTermMemory(backend=...)
```

### Create with backend configurations

```python
from veadk.memory.long_term_memory import LongTermMemory

long_term_memory = LongTermMemory(backend="", backend_config={})
```
"""

backend: Union[
Expand Down Expand Up @@ -313,20 +251,6 @@ async def search_memory(
SearchMemoryResponse:
An object containing a list of `MemoryEntry` items representing
the retrieved memory snippets relevant to the query.

Examples:
```python
response = await memory_service.search_memory(
app_name="chat_app",
user_id="user_123",
query="favorite programming language"
)

for memory in response.memories:
print(memory.content.parts[0].text)
# Output:
# User likes Python and TypeScript for backend development.
```
"""
logger.info(f"Search memory with query={query}")

Expand Down Expand Up @@ -369,3 +293,13 @@ async def search_memory(
f"Return {len(memory_events)} memory events for query: {query} index={self.index} user_id={user_id}"
)
return SearchMemoryResponse(memories=memory_events)

def get_user_profile(self, user_id: str) -> str:
logger.info(f"Get user profile for user_id={user_id}")
if self.backend == "viking":
return self._backend.get_user_profile(user_id=user_id) # type: ignore
else:
logger.error(
f"Long term memory backend {self.backend} does not support get user profile. Return empty string."
)
return ""
55 changes: 55 additions & 0 deletions veadk/memory/long_term_memory_backends/vikingdb_memory_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,3 +224,58 @@ def search_memory(
for r in result
]
return []

def get_user_profile(self, user_id: str) -> str:
from veadk.utils.volcengine_sign import ve_request

response: dict = self._get_client().get_collection(
collection_name=self.index, project=self.volcengine_project
)

mem_id = response["Result"]["ResourceId"]
logger.info(
f"Get user profile for user_id={user_id} from Viking Memory with mem_id={mem_id}"
)

ak = ""
sk = ""
sts_token = ""
if self.volcengine_access_key and self.volcengine_secret_key:
ak = self.volcengine_access_key
sk = self.volcengine_secret_key
sts_token = self.session_token
else:
cred = get_credential_from_vefaas_iam()
ak = cred.access_key_id
sk = cred.secret_access_key
sts_token = cred.session_token

response = ve_request(
request_body={
"filter": {
"user_id": [user_id],
"memory_category": 1,
},
"limit": 5000,
"resource_id": mem_id,
},
action="MemorySearch",
ak=ak,
sk=sk,
header={"X-Security-Token": sts_token},
service="vikingdb",
version="2025-06-09",
region=self.region,
host="open.volcengineapi.com",
)

try:
logger.debug(
f"Response from VikingDB: {response}, user_profile: {response['data']['result_list'][0]['memory_info']['user_profile']}"
)
return response["data"]["result_list"][0]["memory_info"]["user_profile"]
except (KeyError, IndexError):
logger.error(
f"Failed to get user profile for user_id={user_id} mem_id={mem_id}: {response}"
)
return ""
25 changes: 25 additions & 0 deletions veadk/tools/builtin_tools/load_knowledgebase.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,31 @@ async def process_llm_request(
]
)

if self.knowledgebase.query_with_user_profile:
from veadk import Agent

agent = tool_context._invocation_context.agent
if not isinstance(agent, Agent) or not agent.long_term_memory:
logger.error(
"Agent in tool context is not an instance of veadk.Agent or long term memory is not set in agent attribution. Cannot load user profile."
)
return

user_profile = agent.long_term_memory.get_user_profile(tool_context.user_id)

if user_profile:
llm_request.append_instructions(
[
f"""
Please generate the knowledgebase queries based on the user profile (description) at the same time. For example, for a query `quick sort algorithm`, you should generate `quick sort algorithm for python` if the user is a python developer, or `quick sort algorithm friendly introduction` if the user is a beginner.

The user profile is :

{user_profile}
"""
]
)

async def load_knowledgebase(
self, query: str, tool_context: ToolContext
) -> LoadKnowledgebaseResponse:
Expand Down