Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
41 changes: 30 additions & 11 deletions src/uipath/_cli/_evals/mocks/llm_mocker.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
LLMMockingStrategy,
MockingContext,
)
from uipath.platform.errors import EnrichedException
from uipath.tracing import traced
from uipath.tracing._utils import _SpanUtils

Expand Down Expand Up @@ -92,7 +93,6 @@ async def response(
function_name = params.get("name") or func.__name__
if function_name in [x.name for x in self.context.strategy.tools_to_simulate]:
from uipath.platform import UiPath
from uipath.platform.chat._llm_gateway_service import _cleanup_schema

from .mocks import (
cache_manager_context,
Expand All @@ -110,6 +110,8 @@ async def response(
if return_type is None:
return_type = Any

from uipath.platform.chat._llm_gateway_service import _cleanup_schema

output_schema = params.get(
"output_schema", TypeAdapter(return_type).json_schema()
)
Expand Down Expand Up @@ -195,16 +197,33 @@ async def response(
if cached_response is not None:
return cached_response

response = await llm.chat_completions(
[
{
"role": "user",
"content": formatted_prompt,
},
],
response_format=response_format,
**completion_kwargs,
)
messages = [
{
"role": "user",
"content": formatted_prompt,
},
]

try:
response = await llm.chat_completions(
messages,
response_format=response_format,
**completion_kwargs,
)
except EnrichedException as e:
if e.status_code in (400, 422):
logger.warning(
"json_schema response_format not supported by model, "
"falling back to json_object"
)
response = await llm.chat_completions(
messages,
response_format={"type": "json_object"},
**completion_kwargs,
)
else:
raise

result = json.loads(response.choices[0].message.content)

if cache_manager is not None:
Expand Down