File size: 5,530 Bytes
336f4a9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 |
from typing import Literal
from pydantic import BaseModel, Field, field_validator
RETRIEVAL_CRITIC_PROMPT = """You are a senior researcher evaluating document support quality. Follow these steps:
1. Answer Requirements: Identify exact claims needed for complete answer
2. Claim Verification: Check document's direct evidence for each requirement
3. Evidence Strength: Assess quality/reliability of supporting facts
4. Completeness Check: Identify missing elements or partial coverage
5. Support Synthesis: Combine analysis into final support classification
**Question:** {question}
**Document Excerpt:** {context}
Provide detailed reasoning through all steps, then state final decision as either:
- "fully-supported" (covers all requirements with strong evidence)
- "partially-supported" (covers some requirements or weak evidence)
- "no-support" (contains no usable evidence)"""
class RetrievalCriticResult(BaseModel):
"""Structured evaluation of a document's ability to support answering a query.
Validates that the reasoning chain contains all required analytical steps and that
the final decision matches one of the predefined support classifications.
Attributes:
reasoning_chain (str): Step-by-step analysis through verification stages.
Must contain all required section headers.
decision (Literal["fully-supported", "partially-supported", "no-support"]):
Final classification of document support quality.
Raises:
ValueError: If reasoning chain is missing required sections or is too short
ValidationError: If decision value doesn't match allowed literals
Example:
>>> valid_result = RetrievalCriticResult(
... reasoning_chain=(
... "1. Answer Requirements: Needs 3 climate change impacts"
... "2. Claim Verification: Documents sea level rise data"
... "3. Evidence Strength: IPCC report citations provided"
... "4. Completeness Check: Missing economic impact analysis"
... "5. Support Synthesis: Covers 2/3 required impact areas"
... ),
... decision="partially-supported"
... )
>>> valid_result.decision
'partially-supported'
"""
reasoning_chain: str = Field(
...,
description=(
"Systematic analysis through verification stages. Must contain:\n"
"- 1. Answer Requirements: Identification of needed claims\n"
"- 2. Claim Verification: Document evidence checking\n"
"- 3. Evidence Strength: Quality assessment of sources\n"
"- 4. Completeness Check: Missing elements analysis\n"
"- 5. Support Synthesis: Final classification rationale"
),
)
decision: Literal["fully-supported", "partially-supported", "no-support"] = Field(
...,
description=(
"Final classification of document's support quality:\n"
"- 'fully-supported': Comprehensive evidence for all requirements\n"
"- 'partially-supported': Partial or weak evidence coverage\n"
"- 'no-support': No usable evidence found"
),
)
@field_validator("reasoning_chain")
@classmethod
def validate_reasoning_steps(cls, chain_to_validate: str) -> str:
"""Validate the structure and completeness of the analytical reasoning chain.
Ensures the reasoning chain contains all required section headers and meets
minimum length requirements for meaningful analysis.
Args:
chain_to_validate (str): The raw reasoning chain text to validate
Returns:
str: The validated reasoning chain if all requirements are met
Raises:
ValueError: If any required section headers are missing from the chain
Example:
>>> valid_chain = (
... "1. Answer Requirements: Needs 5 economic indicators"
... "2. Claim Verification: GDP data verified"
... "3. Evidence Strength: Government reports cited"
... "4. Completeness Check: Missing unemployment figures"
... "5. Support Synthesis: Covers 4/5 required indicators"
... )
>>> RetrievalCriticResult.validate_reasoning_steps(valid_chain)
'1. Answer Requirements: Needs 5 economic indicators...'
"""
required_steps = [
"1. Answer Requirements",
"2. Claim Verification",
"3. Evidence Strength",
"4. Completeness Check",
"5. Support Synthesis",
]
missing: list[str] = [step for step in required_steps if step not in chain_to_validate]
if missing:
msg = f"Missing required analysis steps: {missing}"
raise ValueError(msg)
return chain_to_validate
model_config = {
"json_schema_extra": {
"example": {
"reasoning_chain": (
"1. Answer Requirements: Needs 3 main battery innovations\n"
"2. Claim Verification: Documents solid-state and lithium-air tech\n"
"3. Evidence Strength: Peer-reviewed study citations\n"
"4. Completeness Check: Missing third innovation details\n"
"5. Support Synthesis: Strong evidence for 2/3 requirements"
),
"decision": "partially-supported",
}
}
}
|