Skip to content

Commit 344a742

Browse files
giulio-leoneCopilot
andcommitted
fix: address Gemini review feedback
Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
1 parent 171f091 commit 344a742

2 files changed

Lines changed: 14 additions & 5 deletions

File tree

src/google/adk/flows/llm_flows/base_llm_flow.py

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -702,6 +702,9 @@ def get_author_for_event(llm_response):
702702
else:
703703
return invocation_context.agent.name
704704

705+
# Cache maps function call names to generated IDs so that partial and
706+
# final streaming events for the same call share a stable ID.
707+
function_call_id_cache: dict[str, str] = {}
705708
try:
706709
while True:
707710
async with Aclosing(llm_connection.receive()) as agen:
@@ -726,6 +729,7 @@ def get_author_for_event(llm_response):
726729
llm_request,
727730
llm_response,
728731
model_response_event,
732+
function_call_id_cache,
729733
)
730734
) as agen:
731735
async for event in agen:
@@ -959,6 +963,7 @@ async def _postprocess_live(
959963
llm_request: LlmRequest,
960964
llm_response: LlmResponse,
961965
model_response_event: Event,
966+
function_call_id_cache: Optional[dict[str, str]] = None,
962967
) -> AsyncGenerator[Event, None]:
963968
"""Postprocess after calling the LLM asynchronously.
964969
@@ -967,6 +972,9 @@ async def _postprocess_live(
967972
llm_request: The original LLM request.
968973
llm_response: The LLM response from the LLM call.
969974
model_response_event: A mutable event for the LLM response.
975+
function_call_id_cache: Optional dict mapping function call names to
976+
previously generated IDs. Keeps IDs stable across partial and final
977+
streaming events.
970978
971979
Yields:
972980
A generator of events.
@@ -1028,7 +1036,8 @@ async def _postprocess_live(
10281036

10291037
# Builds the event.
10301038
model_response_event = self._finalize_model_response_event(
1031-
llm_request, llm_response, model_response_event
1039+
llm_request, llm_response, model_response_event,
1040+
function_call_id_cache,
10321041
)
10331042
yield model_response_event
10341043

src/google/adk/flows/llm_flows/functions.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -194,12 +194,12 @@ def populate_client_function_call_id(
194194
# Use (name, index) as cache key so that two calls to the same
195195
# function in a single response keep separate stable IDs.
196196
cache_key = f'{function_call.name}:{idx}'
197-
if function_call_id_cache is not None and cache_key in function_call_id_cache:
198-
function_call.id = function_call_id_cache[cache_key]
197+
if function_call_id_cache is not None:
198+
function_call.id = function_call_id_cache.setdefault(
199+
cache_key, generate_client_function_call_id()
200+
)
199201
else:
200202
function_call.id = generate_client_function_call_id()
201-
if function_call_id_cache is not None:
202-
function_call_id_cache[cache_key] = function_call.id
203203

204204

205205
def remove_client_function_call_id(content: Optional[types.Content]) -> None:

0 commit comments

Comments
 (0)