Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 29 additions & 0 deletions veadk/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
import os
from typing import Dict, Literal, Optional, Union

from google.adk.flows.llm_flows.base_llm_flow import BaseLlmFlow

# If user didn't set LITELLM_LOCAL_MODEL_COST_MAP, set it to True
# to enable local model cost map.
# This value is `false` by default, which brings heavy performance burden,
Expand Down Expand Up @@ -150,6 +152,8 @@ class Agent(LlmAgent):

example_store: Optional[BaseExampleProvider] = None

enable_supervisor: bool = False

def model_post_init(self, __context: Any) -> None:
super().model_post_init(None) # for sub_agents init

Expand Down Expand Up @@ -402,6 +406,31 @@ def _prepare_tracers(self):
f"Opentelemetry Tracer init {len(self.tracers[0].exporters)} exporters" # type: ignore
)

@property
def _llm_flow(self) -> BaseLlmFlow:
from google.adk.flows.llm_flows.auto_flow import AutoFlow
from google.adk.flows.llm_flows.single_flow import SingleFlow

if (
self.disallow_transfer_to_parent
and self.disallow_transfer_to_peers
and not self.sub_agents
):
from veadk.flows.supervise_single_flow import SupervisorSingleFlow

if self.enable_supervisor:
logger.debug(f"Enable supervisor flow for agent: {self.name}")
return SupervisorSingleFlow(supervised_agent=self)
else:
return SingleFlow()
else:
from veadk.flows.supervise_auto_flow import SupervisorAutoFlow

if self.enable_supervisor:
logger.debug(f"Enable supervisor flow for agent: {self.name}")
return SupervisorAutoFlow(supervised_agent=self)
return AutoFlow()

async def run(self, **kwargs):
raise NotImplementedError(
"Run method in VeADK agent is deprecated since version 0.5.6. Please use runner.run_async instead. Ref: https://agentkit.gitbook.io/docs/runner/overview"
Expand Down
70 changes: 70 additions & 0 deletions veadk/agents/supervise_agent.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from google.adk.models.llm_request import LlmRequest
from jinja2 import Template

from veadk import Agent, Runner
from veadk.utils.logger import get_logger

logger = get_logger(__name__)

instruction = Template("""You are a supervisor of an agent system. The system prompt of worker agent is:

```system prompt
{{ system_prompt }}
```



You should guide the agent to finish task and must output a JSON-format string with specific advice and reason:

- If you think the history execution is not correct, you should give your advice to the worker agent: {"advice": "Your advice here", "reason": "Your reason here"}.
- If you think the history execution is correct, you should output an empty string: {"advice": "", "reason": "Your reason here"}.
""")


def build_supervisor(supervised_agent: Agent) -> Agent:
custom_instruction = instruction.render(system_prompt=supervised_agent.instruction)
agent = Agent(
name="supervisor",
description="A supervisor for agent execution",
instruction=custom_instruction,
)

return agent


async def generate_advice(agent: Agent, llm_request: LlmRequest) -> str:
runner = Runner(agent=agent)

messages = ""
for content in llm_request.contents:
if content and content.parts:
for part in content.parts:
if part.text:
messages += f"{content.role}: {part.text}"
if part.function_call:
messages += f"{content.role}: {part.function_call}"
if part.function_response:
messages += f"{content.role}: {part.function_response}"

prompt = (
f"Agent has the following tools: {llm_request.tools_dict}. History trajectory is: "
+ messages
)

logger.debug(f"Prompt for supervisor: {prompt}")

return await runner.run(messages=prompt)
13 changes: 13 additions & 0 deletions veadk/flows/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
70 changes: 70 additions & 0 deletions veadk/flows/supervise_auto_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import json
from typing import AsyncGenerator

from google.adk.agents.invocation_context import InvocationContext
from google.adk.events import Event
from google.adk.models.llm_request import LlmRequest
from google.adk.models.llm_response import LlmResponse
from google.genai.types import Content, Part

from veadk import Agent
from veadk.agents.supervise_agent import generate_advice
from veadk.flows.supervise_single_flow import SupervisorSingleFlow
from veadk.utils.logger import get_logger

logger = get_logger(__name__)


class SupervisorAutoFlow(SupervisorSingleFlow):
def __init__(self, supervised_agent: Agent):
super().__init__(supervised_agent)

async def _call_llm_async(
self,
invocation_context: InvocationContext,
llm_request: LlmRequest,
model_response_event: Event,
) -> AsyncGenerator[LlmResponse, None]:
supervisor_response = await generate_advice(self._supervisor, llm_request)
logger.debug(f"Advice from supervisor: {supervisor_response}")

advice_and_reason = json.loads(supervisor_response)

if advice_and_reason["advice"]:
logger.debug("Add supervisor advice to llm request.")
llm_request.contents.append(
Content(
parts=[
Part(
text=f"""Message from your supervisor (not user): {advice_and_reason["advice"]}, the corresponding reason is {advice_and_reason["reason"]}

Please follow the advice and reason above to optimize your actions.
"""
)
],
role="user",
)
)
else:
logger.info(
f"Supervisor advice is empty, reason: {advice_and_reason['reason']}. Skip adding to llm request."
)

async for llm_response in super()._call_llm_async(
invocation_context, llm_request, model_response_event
):
yield llm_response
44 changes: 44 additions & 0 deletions veadk/flows/supervise_single_flow.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
# Copyright (c) 2025 Beijing Volcano Engine Technology Co., Ltd. and/or its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import AsyncGenerator

from google.adk.agents.invocation_context import InvocationContext
from google.adk.events import Event
from google.adk.flows.llm_flows.single_flow import SingleFlow
from google.adk.models.llm_request import LlmRequest
from google.adk.models.llm_response import LlmResponse
from typing_extensions import override

from veadk import Agent
from veadk.agents.supervise_agent import build_supervisor


class SupervisorSingleFlow(SingleFlow):
def __init__(self, supervised_agent: Agent):
self._supervisor = build_supervisor(supervised_agent)

super().__init__()

@override
async def _call_llm_async(
self,
invocation_context: InvocationContext,
llm_request: LlmRequest,
model_response_event: Event,
) -> AsyncGenerator[LlmResponse, None]:
async for llm_response in super()._call_llm_async(
invocation_context, llm_request, model_response_event
):
yield llm_response