diff --git a/src/llm/apis/openai_completions.cpp b/src/llm/apis/openai_completions.cpp index 810fcdc50a..62653258b9 100644 --- a/src/llm/apis/openai_completions.cpp +++ b/src/llm/apis/openai_completions.cpp @@ -409,17 +409,18 @@ std::string OpenAIChatCompletionsHandler::serializeUnaryResponse(ov::genai::Enco // choices: array of size N, where N is related to n request parameter jsonResponse.StartArray("choices"); - int index = 0; - for (int i = 0; i < results.tokens.size(); i++) { + for (size_t i = 0; i < results.tokens.size(); ++i) { const std::vector& tokens = results.tokens[i]; SPDLOG_LOGGER_TRACE(llm_calculator_logger, "Generated tokens: {}", tokens); ParsedOutput parsedOutput = parseOutputIfNeeded(tokens); jsonResponse.StartObject(); // finish_reason: "stop" in regular scenario, "tool_calls" if output contains tool calls - auto finishReason = mapFinishReason(ov::genai::GenerationFinishReason::STOP, !parsedOutput.toolCalls.empty()); + const ov::genai::GenerationFinishReason finishReasonRaw = + (i < results.finish_reasons.size()) ? results.finish_reasons[i] : ov::genai::GenerationFinishReason::STOP; + auto finishReason = mapFinishReason(finishReasonRaw, !parsedOutput.toolCalls.empty()); jsonResponse.FinishReason(finishReason.value_or("unknown")); // index: integer; Choice index, only n=1 supported anyway - jsonResponse.Index(index++); + jsonResponse.Index(static_cast(i)); if (endpoint == Endpoint::CHAT_COMPLETIONS) { jsonResponse.MessageObject(parsedOutput); @@ -462,7 +463,9 @@ std::string OpenAIChatCompletionsHandler::serializeUnaryResponse(ov::genai::VLMD OVMS_PROFILE_FUNCTION(); usage.promptTokens = results.perf_metrics.get_num_input_tokens(); usage.completionTokens = results.perf_metrics.get_num_generated_tokens(); - + if (results.finish_reasons.empty()) { + throw std::runtime_error("Missing finish reason in unary VLM responses generation result"); + } OpenAiJsonResponse jsonResponse; jsonResponse.StartObject(); @@ -480,8 +483,14 @@ std::string OpenAIChatCompletionsHandler::serializeUnaryResponse(ov::genai::VLMD SPDLOG_LOGGER_TRACE(llm_calculator_logger, "Generated tokens: {}", generatedTokens); ParsedOutput parsedOutput = parseOutputIfNeeded(generatedTokens); jsonResponse.StartObject(); - // finish_reason: "stop" in regular scenario, "tool_calls" if output contains tool calls - auto finishReason = mapFinishReason(ov::genai::GenerationFinishReason::STOP, !parsedOutput.toolCalls.empty()); + ov::genai::GenerationFinishReason responsesFinishReason = ov::genai::GenerationFinishReason::STOP; + for (const auto& finishReason : results.finish_reasons) { + if (finishReason == ov::genai::GenerationFinishReason::LENGTH) { + responsesFinishReason = ov::genai::GenerationFinishReason::LENGTH; + break; + } + } + auto finishReason = mapFinishReason(responsesFinishReason, !parsedOutput.toolCalls.empty()); jsonResponse.FinishReason(finishReason.value_or("unknown")); // index: integer; Choice index, only n=1 supported anyway jsonResponse.Index(index++); diff --git a/src/llm/apis/openai_responses.cpp b/src/llm/apis/openai_responses.cpp index 60ec1c4f08..76708be7e1 100644 --- a/src/llm/apis/openai_responses.cpp +++ b/src/llm/apis/openai_responses.cpp @@ -653,10 +653,17 @@ std::string OpenAIResponsesHandler::serializeUnaryResponse(ov::genai::EncodedRes usage.promptTokens = results.perf_metrics.get_num_input_tokens(); usage.completionTokens = results.perf_metrics.get_num_generated_tokens(); std::vector parsedOutputs; + ov::genai::GenerationFinishReason responsesFinishReason = ov::genai::GenerationFinishReason::STOP; for (const auto& tokens : results.tokens) { parsedOutputs.push_back(parseOutputIfNeeded(tokens)); } - return serializeUnaryResponseImpl(parsedOutputs); + for (const auto& finishReason : results.finish_reasons) { + if (finishReason == ov::genai::GenerationFinishReason::LENGTH) { + responsesFinishReason = ov::genai::GenerationFinishReason::LENGTH; + break; + } + } + return serializeUnaryResponseImpl(parsedOutputs, responsesFinishReason); } std::string OpenAIResponsesHandler::serializeUnaryResponse(ov::genai::VLMDecodedResults& results, const std::string& textResponse) { @@ -677,7 +684,14 @@ std::string OpenAIResponsesHandler::serializeUnaryResponse(ov::genai::VLMDecoded parsedOutputs.push_back(std::move(output)); } } - return serializeUnaryResponseImpl(parsedOutputs); + ov::genai::GenerationFinishReason responsesFinishReason = ov::genai::GenerationFinishReason::STOP; + for (const auto& finishReason : results.finish_reasons) { + if (finishReason == ov::genai::GenerationFinishReason::LENGTH) { + responsesFinishReason = ov::genai::GenerationFinishReason::LENGTH; + break; + } + } + return serializeUnaryResponseImpl(parsedOutputs, responsesFinishReason); } // --- Streaming event building blocks --- diff --git a/src/llm/language_model/legacy/servable.cpp b/src/llm/language_model/legacy/servable.cpp index 4234088a2a..3ae0055530 100644 --- a/src/llm/language_model/legacy/servable.cpp +++ b/src/llm/language_model/legacy/servable.cpp @@ -229,7 +229,11 @@ absl::Status LegacyServable::preparePartialResponse(std::shared_ptrlastStreamerCallbackOutput.empty()) { lastTextChunk = lastTextChunk + executionContext->lastStreamerCallbackOutput; } - std::string serializedChunk = executionContext->apiHandler->serializeStreamingChunk(lastTextChunk, ov::genai::GenerationFinishReason::STOP); + ov::genai::GenerationFinishReason finishReason = ov::genai::GenerationFinishReason::STOP; + if (!legacyExecutionContext->results.finish_reasons.empty()) { + finishReason = legacyExecutionContext->results.finish_reasons[0]; + } + std::string serializedChunk = executionContext->apiHandler->serializeStreamingChunk(lastTextChunk, finishReason); if (!serializedChunk.empty()) { executionContext->response = wrapTextInServerSideEventMessage(serializedChunk); } diff --git a/src/test/llm/llmnode_test.cpp b/src/test/llm/llmnode_test.cpp index 392717737d..304a1df25d 100644 --- a/src/test/llm/llmnode_test.cpp +++ b/src/test/llm/llmnode_test.cpp @@ -2685,7 +2685,7 @@ INSTANTIATE_TEST_SUITE_P( ::testing::Values( // params: model name, generate expected output, check logprobs, check finish reason, test speculative decoding, supports empty handshake msg TestParameters{"lm_cb_regular", true, true, true, false, true}, - TestParameters{"lm_legacy_regular", false, false, false, false, false}, + TestParameters{"lm_legacy_regular", false, false, true, false, false}, TestParameters{"vlm_cb_regular", false, true, true, false, true}, TestParameters{"vlm_legacy_regular", false, false, false, false, false}));