-
Notifications
You must be signed in to change notification settings - Fork 3.2k
Throw exception if LLM output is not parseable #45158
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||||||||||||||||||||||||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -232,13 +232,9 @@ async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[float, str]]: # t | |||||||||||||||||||||||||||||||||||||||||||||||
| f"{self._result_key}_sample_input": prompty_output_dict.get("sample_input", ""), | ||||||||||||||||||||||||||||||||||||||||||||||||
| f"{self._result_key}_sample_output": prompty_output_dict.get("sample_output", ""), | ||||||||||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||||||||||
| if logger: | ||||||||||||||||||||||||||||||||||||||||||||||||
| logger.warning("LLM output is not a dictionary, returning NaN for the score.") | ||||||||||||||||||||||||||||||||||||||||||||||||
|
|
||||||||||||||||||||||||||||||||||||||||||||||||
| score = math.nan | ||||||||||||||||||||||||||||||||||||||||||||||||
| binary_result = self._get_binary_result(score) | ||||||||||||||||||||||||||||||||||||||||||||||||
| return { | ||||||||||||||||||||||||||||||||||||||||||||||||
| self._result_key: float(score), | ||||||||||||||||||||||||||||||||||||||||||||||||
| f"{self._result_key}_result": binary_result, | ||||||||||||||||||||||||||||||||||||||||||||||||
| f"{self._result_key}_threshold": self._threshold, | ||||||||||||||||||||||||||||||||||||||||||||||||
| } | ||||||||||||||||||||||||||||||||||||||||||||||||
| raise EvaluationException( | ||||||||||||||||||||||||||||||||||||||||||||||||
| message="Evaluator returned invalid output.", | ||||||||||||||||||||||||||||||||||||||||||||||||
| blame=ErrorBlame.SYSTEM_ERROR, | ||||||||||||||||||||||||||||||||||||||||||||||||
| category=ErrorCategory.FAILED_EXECUTION, | ||||||||||||||||||||||||||||||||||||||||||||||||
| target=ErrorTarget.EVALUATE, | ||||||||||||||||||||||||||||||||||||||||||||||||
| ) | ||||||||||||||||||||||||||||||||||||||||||||||||
|
Comment on lines
+235
to
+240
|
||||||||||||||||||||||||||||||||||||||||||||||||
| raise EvaluationException( | |
| message="Evaluator returned invalid output.", | |
| blame=ErrorBlame.SYSTEM_ERROR, | |
| category=ErrorCategory.FAILED_EXECUTION, | |
| target=ErrorTarget.EVALUATE, | |
| ) | |
| if logger: | |
| logger.warning( | |
| "LLM output is not a dictionary; returning NaN for the score and empty reason." | |
| ) | |
| return { | |
| f"{self._result_key}": math.nan, | |
| f"{self._result_key}_reason": "", | |
| f"{self._result_key}_result": "fail", | |
| f"{self._result_key}_threshold": self._threshold, | |
| f"{self._result_key}_prompt_tokens": prompty_output_dict.get("input_token_count", 0), | |
| f"{self._result_key}_completion_tokens": prompty_output_dict.get("output_token_count", 0), | |
| f"{self._result_key}_total_tokens": prompty_output_dict.get("total_token_count", 0), | |
| f"{self._result_key}_finish_reason": prompty_output_dict.get("finish_reason", ""), | |
| f"{self._result_key}_model": prompty_output_dict.get("model_id", ""), | |
| f"{self._result_key}_sample_input": prompty_output_dict.get("sample_input", ""), | |
| f"{self._result_key}_sample_output": prompty_output_dict.get("sample_output", ""), | |
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
This is a breaking change. The existing test at line 230-244 in test_task_completion_evaluator.py expects this evaluator to return 0 when LLM output is not a dictionary, but the new code raises an exception instead. The test needs to be updated to expect an EvaluationException.