Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
from semantic_kernel.contents.annotation_content import AnnotationContent
from semantic_kernel.contents.file_reference_content import FileReferenceContent
from semantic_kernel.contents.function_call_content import FunctionCallContent
from semantic_kernel.contents.text_content import TextContent
from semantic_kernel.exceptions.service_exceptions import ServiceInvalidExecutionSettingsError
from semantic_kernel.services.ai_service_client_base import AIServiceClientBase
from semantic_kernel.utils.telemetry.model_diagnostics.gen_ai_attributes import AVAILABLE_FUNCTIONS
Expand Down Expand Up @@ -135,16 +136,18 @@ async def get_chat_message_contents(

# Auto invoke loop
with use_span(self._start_auto_function_invocation_activity(kernel, settings), end_on_exit=True) as _:
function_call_messages: list["ChatMessageContent"] = []
for request_index in range(settings.function_choice_behavior.maximum_auto_invoke_attempts):
completions = await self._inner_get_chat_message_contents(chat_history, settings)
# Get the function call contents from the chat message. There is only one chat message,
# which should be checked in the `_verify_function_choice_settings` method.
function_calls = [item for item in completions[0].items if isinstance(item, FunctionCallContent)]
if (fc_count := len(function_calls)) == 0:
Comment thread
pragnyanramtha marked this conversation as resolved.
return completions
return self._combine_auto_invoke_text_responses(completions, function_call_messages)

# Since we have a function call, add the assistant's tool call message to the history
chat_history.add_message(message=completions[0])
function_call_messages.append(completions[0])

logger.info(f"processing {fc_count} tool calls in parallel.")

Expand All @@ -167,11 +170,15 @@ async def get_chat_message_contents(
)

if any(result.terminate for result in results if result is not None):
return merge_function_results(chat_history.messages[-len(results) :])
return self._combine_auto_invoke_text_responses(
merge_function_results(chat_history.messages[-len(results) :]),
function_call_messages,
)
else:
# Do a final call, without function calling when the max has been reached.
self._reset_function_choice_settings(settings)
return await self._inner_get_chat_message_contents(chat_history, settings)
completions = await self._inner_get_chat_message_contents(chat_history, settings)
return self._combine_auto_invoke_text_responses(completions, function_call_messages)

async def get_chat_message_content(
self, chat_history: "ChatHistory", settings: "PromptExecutionSettings", **kwargs: Any
Expand Down Expand Up @@ -377,6 +384,43 @@ def _prepare_chat_history_for_request(
if not isinstance(message, (AnnotationContent, FileReferenceContent))
]

def _combine_auto_invoke_text_responses(
self,
completions: list["ChatMessageContent"],
function_call_messages: list["ChatMessageContent"],
) -> list["ChatMessageContent"]:
"""Prepend text from intermediate function-call responses to the final response."""
text_parts = [
item.text
for message in function_call_messages
for item in message.items
if isinstance(item, TextContent) and item.text
]
if not text_parts:
return completions

prefix_text = "\n\n".join(text_parts)
combined_completions = copy.deepcopy(completions)
for completion in combined_completions:
text_item = next((item for item in completion.items if isinstance(item, TextContent)), None)
if text_item is None:
completion.items.insert(
0,
TextContent(
ai_model_id=completion.ai_model_id,
inner_content=completion.inner_content,
metadata=completion.metadata,
text=prefix_text,
encoding=completion.encoding,
),
)
continue

text_item.text = "\n\n".join(part for part in [prefix_text, text_item.text] if part)
text_item.encoding = completion.encoding

return combined_completions

def _verify_function_choice_settings(self, settings: "PromptExecutionSettings") -> None:
"""Additional verification to validate settings for function choice behavior.

Expand Down
191 changes: 191 additions & 0 deletions python/tests/unit/connectors/ai/test_chat_completion_client_base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,191 @@
# Copyright (c) Microsoft. All rights reserved.

from semantic_kernel.connectors.ai.chat_completion_client_base import ChatCompletionClientBase
from semantic_kernel.connectors.ai.function_choice_behavior import FunctionChoiceBehavior
from semantic_kernel.connectors.ai.prompt_execution_settings import PromptExecutionSettings
from semantic_kernel.contents import (
ChatHistory,
ChatMessageContent,
FunctionCallContent,
FunctionResultContent,
TextContent,
)
from semantic_kernel.contents.utils.author_role import AuthorRole
from semantic_kernel.filters.filter_types import FilterTypes
from semantic_kernel.functions.kernel_arguments import KernelArguments
from semantic_kernel.functions.kernel_function_decorator import kernel_function
from semantic_kernel.kernel import Kernel


class StubChatCompletion(ChatCompletionClientBase):
SUPPORTS_FUNCTION_CALLING = True
responses: list[list[ChatMessageContent]]

async def _inner_get_chat_message_contents(
self,
chat_history: ChatHistory,
settings: PromptExecutionSettings,
) -> list[ChatMessageContent]:
return self.responses.pop(0)


def _tool_call_message(text: str | None, call_id: str = "call_1") -> ChatMessageContent:
items = []
if text is not None:
items.append(TextContent(text=text))
items.append(FunctionCallContent(id=call_id, name="test-tool", arguments={}))
return ChatMessageContent(role=AuthorRole.ASSISTANT, items=items)


async def test_auto_function_call_preserves_intermediate_text_in_final_response() -> None:
kernel = Kernel()
kernel.add_function("test", kernel_function(lambda: "tool result", name="tool"))

chat_completion = StubChatCompletion(
ai_model_id="test-model",
responses=[
[
ChatMessageContent(
role=AuthorRole.ASSISTANT,
items=[
TextContent(text="I'll check that."),
FunctionCallContent(id="call_1", name="test-tool", arguments={}),
],
)
],
[ChatMessageContent(role=AuthorRole.ASSISTANT, content="The answer is ready.")],
],
)

result = await chat_completion.get_chat_message_contents(
chat_history=ChatHistory(system_message="Test"),
settings=PromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()),
kernel=kernel,
arguments=KernelArguments(),
)

assert result[0].content == "I'll check that.\n\nThe answer is ready."


async def test_auto_function_call_preserves_intermediate_text_after_max_attempts() -> None:
kernel = Kernel()
kernel.add_function("test", kernel_function(lambda: "tool result", name="tool"))

chat_completion = StubChatCompletion(
ai_model_id="test-model",
responses=[
[_tool_call_message("I'll check that.")],
[ChatMessageContent(role=AuthorRole.ASSISTANT, content="The answer is ready.")],
],
)

result = await chat_completion.get_chat_message_contents(
chat_history=ChatHistory(system_message="Test"),
settings=PromptExecutionSettings(
function_choice_behavior=FunctionChoiceBehavior.Auto(maximum_auto_invoke_attempts=1),
),
kernel=kernel,
arguments=KernelArguments(),
)

assert result[0].content == "I'll check that.\n\nThe answer is ready."


async def test_auto_function_call_preserves_text_from_multiple_intermediate_responses() -> None:
kernel = Kernel()
kernel.add_function("test", kernel_function(lambda: "tool result", name="tool"))

chat_completion = StubChatCompletion(
ai_model_id="test-model",
responses=[
[_tool_call_message("First tool call.", call_id="call_1")],
[_tool_call_message("Second tool call.", call_id="call_2")],
[ChatMessageContent(role=AuthorRole.ASSISTANT, content="The answer is ready.")],
],
)

result = await chat_completion.get_chat_message_contents(
chat_history=ChatHistory(system_message="Test"),
settings=PromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()),
kernel=kernel,
arguments=KernelArguments(),
)

assert result[0].content == "First tool call.\n\nSecond tool call.\n\nThe answer is ready."


async def test_auto_function_call_inserts_intermediate_text_when_final_response_has_no_text() -> None:
kernel = Kernel()
kernel.add_function("test", kernel_function(lambda: "tool result", name="tool"))

chat_completion = StubChatCompletion(
ai_model_id="test-model",
responses=[
[_tool_call_message("I'll check that.")],
[ChatMessageContent(role=AuthorRole.ASSISTANT, items=[])],
],
)

result = await chat_completion.get_chat_message_contents(
chat_history=ChatHistory(system_message="Test"),
settings=PromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()),
kernel=kernel,
arguments=KernelArguments(),
)

assert result[0].content == "I'll check that."


async def test_auto_function_call_preserves_intermediate_text_when_filter_terminates() -> None:
kernel = Kernel()
kernel.add_function("test", kernel_function(lambda: "tool result", name="tool"))

@kernel.filter(FilterTypes.AUTO_FUNCTION_INVOCATION)
async def auto_invoke_terminate(context, next):
await next(context)
context.terminate = True

chat_completion = StubChatCompletion(
ai_model_id="test-model",
responses=[
[_tool_call_message("I'll check that.")],
],
)

result = await chat_completion.get_chat_message_contents(
chat_history=ChatHistory(system_message="Test"),
settings=PromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()),
kernel=kernel,
arguments=KernelArguments(),
)

assert result[0].content == "I'll check that."
assert any(isinstance(item, FunctionResultContent) for item in result[0].items)


async def test_auto_function_call_preserves_final_response_when_intermediate_has_no_text() -> None:
kernel = Kernel()
kernel.add_function("test", kernel_function(lambda: "tool result", name="tool"))
final_response = ChatMessageContent(role=AuthorRole.ASSISTANT, content="The answer is ready.")

chat_completion = StubChatCompletion(
ai_model_id="test-model",
responses=[
[
ChatMessageContent(
role=AuthorRole.ASSISTANT,
items=[FunctionCallContent(id="call_1", name="test-tool", arguments={})],
)
],
[final_response],
],
)

result = await chat_completion.get_chat_message_contents(
chat_history=ChatHistory(system_message="Test"),
settings=PromptExecutionSettings(function_choice_behavior=FunctionChoiceBehavior.Auto()),
kernel=kernel,
arguments=KernelArguments(),
)

assert result == [final_response]
Comment thread
pragnyanramtha marked this conversation as resolved.
Loading