Skip to content

Commit e9e47ca

Browse files
cristipufuclaude
andcommitted
fix: use runtime factory for eval entrypoint discovery and show usage help
Replace hardcoded uipath.json-only entrypoint discovery with runtime_factory.discover_entrypoints() which supports all runtimes (langgraph, llama_index, etc). When auto-discovery fails (multiple entrypoints or eval sets), show a clean usage message listing available options instead of a ValueError traceback. Co-Authored-By: Claude Opus 4.6 <[email protected]>
1 parent e262dcb commit e9e47ca

File tree

7 files changed

+495
-157
lines changed

7 files changed

+495
-157
lines changed

pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[project]
22
name = "uipath"
3-
version = "2.10.3"
3+
version = "2.10.4"
44
description = "Python SDK and CLI for UiPath Platform, enabling programmatic interaction with automation services, process management, and deployment tools."
55
readme = { file = "README.md", content-type = "text/markdown" }
66
requires-python = ">=3.11"

src/uipath/_cli/cli_eval.py

Lines changed: 135 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import logging
44
import os
55
import uuid
6+
from pathlib import Path
67
from typing import Any
78

89
import click
@@ -15,8 +16,7 @@
1516
from uipath._cli.middlewares import Middlewares
1617
from uipath.core.events import EventBus
1718
from uipath.core.tracing import UiPathTraceManager
18-
from uipath.eval._helpers import auto_discover_entrypoint
19-
from uipath.eval.helpers import EvalHelpers
19+
from uipath.eval.helpers import EVAL_SETS_DIRECTORY_NAME, EvalHelpers
2020
from uipath.eval.models.evaluation_set import EvaluationSet
2121
from uipath.eval.runtime import UiPathEvalContext, evaluate
2222
from uipath.platform.chat import set_llm_concurrency
@@ -135,6 +135,55 @@ def _resolve_model_settings_override(
135135
return override if override else None
136136

137137

138+
class _EvalDiscoveryError(Exception):
139+
"""Raised when auto-discovery of entrypoint or eval set fails."""
140+
141+
def __init__(self, entrypoints: list[str], eval_sets: list[Path]):
142+
self.entrypoints = entrypoints
143+
self.eval_sets = eval_sets
144+
145+
146+
def _discover_eval_sets() -> list[Path]:
147+
"""Discover available eval set files."""
148+
eval_sets_dir = Path(EVAL_SETS_DIRECTORY_NAME)
149+
if eval_sets_dir.exists():
150+
return sorted(eval_sets_dir.glob("*.json"))
151+
return []
152+
153+
154+
def _show_eval_usage_help(entrypoints: list[str], eval_set_files: list[Path]) -> None:
155+
"""Show available entrypoints and eval sets with usage examples."""
156+
lines: list[str] = []
157+
158+
if entrypoints:
159+
lines.append("Available entrypoints:")
160+
for name in entrypoints:
161+
lines.append(f" - {name}")
162+
else:
163+
lines.append(
164+
"No entrypoints found. "
165+
"Add a 'functions' or 'agents' section to your config file "
166+
"(e.g. uipath.json, langgraph.json)."
167+
)
168+
169+
if eval_set_files:
170+
lines.append("\nAvailable eval sets:")
171+
for f in eval_set_files:
172+
lines.append(f" - {f}")
173+
else:
174+
lines.append(
175+
f"\nNo eval sets found in '{EVAL_SETS_DIRECTORY_NAME}/' directory."
176+
)
177+
178+
lines.append("\nUsage: uipath eval <entrypoint> <eval_set>")
179+
if entrypoints and eval_set_files:
180+
ep_name = entrypoints[0]
181+
es_path = eval_set_files[0]
182+
lines.append(f"Example: uipath eval {ep_name} {es_path}")
183+
184+
click.echo("\n".join(lines))
185+
186+
138187
@click.command()
139188
@click.argument("entrypoint", required=False)
140189
@click.argument("eval_set", required=False)
@@ -266,16 +315,9 @@ def eval(
266315

267316
if result.should_continue:
268317
eval_context = UiPathEvalContext()
269-
270-
eval_context.entrypoint = entrypoint or auto_discover_entrypoint()
271318
eval_context.workers = workers
272319
eval_context.eval_set_run_id = eval_set_run_id
273320
eval_context.enable_mocker_cache = enable_mocker_cache
274-
275-
# Load eval set to resolve the path
276-
eval_set_path = eval_set or EvalHelpers.auto_discover_eval_set()
277-
_, resolved_eval_set_path = EvalHelpers.load_eval_set(eval_set_path, eval_ids)
278-
279321
eval_context.report_coverage = report_coverage
280322
eval_context.input_overrides = input_overrides
281323
eval_context.resume = resume
@@ -309,69 +351,103 @@ async def execute_eval():
309351
eval_context.job_id = ctx.job_id
310352

311353
runtime_factory = UiPathRuntimeFactoryRegistry.get(context=ctx)
312-
factory_settings = await runtime_factory.get_settings()
313-
trace_settings = (
314-
factory_settings.trace_settings if factory_settings else None
315-
)
316-
317-
if (
318-
ctx.job_id or should_register_progress_reporter
319-
) and UiPathConfig.is_tracing_enabled:
320-
# Live tracking for Orchestrator or Studio Web
321-
# Uses UIPATH_TRACE_ID from environment for trace correlation
322-
trace_manager.add_span_processor(
323-
LiveTrackingSpanProcessor(
324-
LlmOpsHttpExporter(),
325-
settings=trace_settings,
326-
)
354+
355+
try:
356+
# Auto-discover entrypoint and eval set using the runtime factory
357+
resolved_entrypoint = entrypoint
358+
eval_set_path = eval_set
359+
360+
available_entrypoints = runtime_factory.discover_entrypoints()
361+
available_eval_sets = _discover_eval_sets()
362+
363+
if not resolved_entrypoint:
364+
if len(available_entrypoints) == 1:
365+
resolved_entrypoint = available_entrypoints[0]
366+
else:
367+
raise _EvalDiscoveryError(
368+
available_entrypoints, available_eval_sets
369+
)
370+
371+
if not eval_set_path:
372+
if len(available_eval_sets) == 1:
373+
eval_set_path = str(available_eval_sets[0])
374+
else:
375+
raise _EvalDiscoveryError(
376+
available_entrypoints, available_eval_sets
377+
)
378+
379+
eval_context.entrypoint = resolved_entrypoint
380+
381+
# Load eval set to resolve the path
382+
_, resolved_eval_set_path = EvalHelpers.load_eval_set(
383+
eval_set_path, eval_ids
327384
)
328385

329-
if trace_file:
386+
factory_settings = await runtime_factory.get_settings()
330387
trace_settings = (
331388
factory_settings.trace_settings
332389
if factory_settings
333390
else None
334391
)
335-
trace_manager.add_span_exporter(
336-
JsonLinesFileExporter(trace_file), settings=trace_settings
337-
)
338392

339-
project_id = UiPathConfig.project_id
393+
if (
394+
ctx.job_id or should_register_progress_reporter
395+
) and UiPathConfig.is_tracing_enabled:
396+
# Live tracking for Orchestrator or Studio Web
397+
# Uses UIPATH_TRACE_ID from environment for trace correlation
398+
trace_manager.add_span_processor(
399+
LiveTrackingSpanProcessor(
400+
LlmOpsHttpExporter(),
401+
settings=trace_settings,
402+
)
403+
)
340404

341-
eval_context.execution_id = (
342-
eval_context.job_id
343-
or eval_context.eval_set_run_id
344-
or str(uuid.uuid4())
345-
)
405+
if trace_file:
406+
trace_settings = (
407+
factory_settings.trace_settings
408+
if factory_settings
409+
else None
410+
)
411+
trace_manager.add_span_exporter(
412+
JsonLinesFileExporter(trace_file),
413+
settings=trace_settings,
414+
)
346415

347-
# Load eval set (path is already resolved in cli_eval.py)
348-
eval_context.evaluation_set, _ = EvalHelpers.load_eval_set(
349-
resolved_eval_set_path, eval_ids
350-
)
416+
project_id = UiPathConfig.project_id
351417

352-
# Resolve model settings override from eval set
353-
settings_override = _resolve_model_settings_override(
354-
model_settings_id, eval_context.evaluation_set
355-
)
418+
eval_context.execution_id = (
419+
eval_context.job_id
420+
or eval_context.eval_set_run_id
421+
or str(uuid.uuid4())
422+
)
356423

357-
runtime = await runtime_factory.new_runtime(
358-
entrypoint=eval_context.entrypoint or "",
359-
runtime_id=eval_context.execution_id,
360-
settings=settings_override,
361-
)
424+
# Load eval set (path is already resolved in cli_eval.py)
425+
eval_context.evaluation_set, _ = EvalHelpers.load_eval_set(
426+
resolved_eval_set_path, eval_ids
427+
)
362428

363-
eval_context.runtime_schema = await runtime.get_schema()
429+
# Resolve model settings override from eval set
430+
settings_override = _resolve_model_settings_override(
431+
model_settings_id, eval_context.evaluation_set
432+
)
364433

365-
eval_context.evaluators = await EvalHelpers.load_evaluators(
366-
resolved_eval_set_path,
367-
eval_context.evaluation_set,
368-
_get_agent_model(eval_context.runtime_schema),
369-
)
434+
runtime = await runtime_factory.new_runtime(
435+
entrypoint=eval_context.entrypoint or "",
436+
runtime_id=eval_context.execution_id,
437+
settings=settings_override,
438+
)
370439

371-
# Runtime is not required anymore.
372-
await runtime.dispose()
440+
eval_context.runtime_schema = await runtime.get_schema()
441+
442+
eval_context.evaluators = await EvalHelpers.load_evaluators(
443+
resolved_eval_set_path,
444+
eval_context.evaluation_set,
445+
_get_agent_model(eval_context.runtime_schema),
446+
)
447+
448+
# Runtime is not required anymore.
449+
await runtime.dispose()
373450

374-
try:
375451
if project_id:
376452
studio_client = StudioClient(project_id)
377453

@@ -395,11 +471,12 @@ async def execute_eval():
395471
event_bus,
396472
)
397473
finally:
398-
if runtime_factory:
399-
await runtime_factory.dispose()
474+
await runtime_factory.dispose()
400475

401476
asyncio.run(execute_eval())
402477

478+
except _EvalDiscoveryError as e:
479+
_show_eval_usage_help(e.entrypoints, e.eval_sets)
403480
except Exception as e:
404481
console.error(
405482
f"Error occurred: {e or 'Execution failed'}", include_traceback=True
Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1 @@
1-
from .helpers import auto_discover_entrypoint
2-
3-
__all__ = ["auto_discover_entrypoint"]
1+
"""Helper functions for evaluation process."""

src/uipath/eval/_helpers/helpers.py

Lines changed: 2 additions & 50 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,10 @@
1+
"""Helper functions for evaluation process."""
2+
13
import functools
2-
import json
3-
import os
44
import time
55
from collections.abc import Callable
66
from typing import Any
77

8-
import click
9-
108
from ..models import ErrorEvaluationResult, EvaluationResult
119

1210

@@ -37,52 +35,6 @@ def is_empty_value(value: Any) -> bool:
3735
return False
3836

3937

40-
def auto_discover_entrypoint() -> str:
41-
"""Auto-discover entrypoint from config file.
42-
43-
Returns:
44-
Entrypoint name (key from the functions dict)
45-
46-
Raises:
47-
ValueError: If no entrypoint found or multiple entrypoints exist
48-
"""
49-
from uipath._cli._utils._console import ConsoleLogger
50-
from uipath._utils.constants import UIPATH_CONFIG_FILE
51-
52-
console = ConsoleLogger()
53-
54-
if not os.path.isfile(UIPATH_CONFIG_FILE):
55-
raise ValueError(
56-
f"File '{UIPATH_CONFIG_FILE}' not found. Please run 'uipath init'."
57-
)
58-
59-
with open(UIPATH_CONFIG_FILE, "r", encoding="utf-8") as f:
60-
uipath_config = json.loads(f.read())
61-
62-
entrypoints: dict[str, str] = uipath_config.get("functions", {})
63-
64-
if not entrypoints:
65-
raise ValueError(
66-
f"No entrypoints found in {UIPATH_CONFIG_FILE}. "
67-
"Add a 'functions' section to uipath.json"
68-
)
69-
70-
if len(entrypoints) > 1:
71-
entrypoint_list = list(entrypoints.keys())
72-
raise ValueError(
73-
f"Multiple entrypoints found: {entrypoint_list}. "
74-
"Please specify which entrypoint to use."
75-
)
76-
77-
entrypoint_name = next(iter(entrypoints.keys()))
78-
entrypoint_path = entrypoints[entrypoint_name]
79-
console.info(
80-
f"Auto-discovered entrypoint: {click.style(entrypoint_name, fg='cyan')} "
81-
f"({entrypoint_path})"
82-
)
83-
return entrypoint_name
84-
85-
8638
def track_evaluation_metrics(func: Callable[..., Any]) -> Callable[..., Any]:
8739
"""Decorator to track evaluation metrics and handle errors gracefully."""
8840

src/uipath/eval/helpers.py

Lines changed: 0 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
from pathlib import Path
66
from typing import Any
77

8-
import click
98
from pydantic import ValidationError
109

1110
from uipath._cli._evals._conversational_utils import UiPathLegacyEvalChatMessagesMapper
@@ -46,49 +45,6 @@ def discriminate_eval_set(data: dict[str, Any]) -> EvaluationSet | LegacyEvaluat
4645
class EvalHelpers:
4746
"""Helper functions for evaluation commands, including loading and parsing evaluation sets and evaluators."""
4847

49-
@staticmethod
50-
def auto_discover_eval_set() -> str:
51-
"""Auto-discover evaluation set from {EVAL_SETS_DIRECTORY_NAME} directory.
52-
53-
Returns:
54-
Path to the evaluation set file
55-
56-
Raises:
57-
ValueError: If no eval set found or multiple eval sets exist
58-
"""
59-
eval_sets_dir = Path(EVAL_SETS_DIRECTORY_NAME)
60-
61-
if not eval_sets_dir.exists():
62-
raise ValueError(
63-
f"No '{EVAL_SETS_DIRECTORY_NAME}' directory found. "
64-
"Please set 'UIPATH_PROJECT_ID' env var and run 'uipath pull'."
65-
)
66-
67-
eval_set_files = list(eval_sets_dir.glob("*.json"))
68-
69-
if not eval_set_files:
70-
raise ValueError(
71-
f"No evaluation set files found in '{EVAL_SETS_DIRECTORY_NAME}' directory. "
72-
)
73-
74-
if len(eval_set_files) > 1:
75-
file_names = [f.name for f in eval_set_files]
76-
raise ValueError(
77-
f"Multiple evaluation sets found: {file_names}. "
78-
f"Please specify which evaluation set to use: 'uipath eval [entrypoint] <eval_set_path>'"
79-
)
80-
81-
eval_set_path = str(eval_set_files[0])
82-
logger.info(
83-
f"Auto-discovered evaluation set: {click.style(eval_set_path, fg='cyan')}"
84-
)
85-
86-
eval_set_path_obj = Path(eval_set_path)
87-
if not eval_set_path_obj.is_file() or eval_set_path_obj.suffix != ".json":
88-
raise ValueError("Evaluation set must be a JSON file")
89-
90-
return eval_set_path
91-
9248
@staticmethod
9349
def load_eval_set(
9450
eval_set_path: str, eval_ids: list[str] | None = None

0 commit comments

Comments
 (0)