File size: 4,325 Bytes
336f4a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
from typing import Literal

from pydantic import BaseModel, Field, field_validator

RETRIEVAL_EVALUATION_PROMPT = """You are a senior analyst evaluating document relevance. Follow these steps:

1. Question Core: Identify essential information needed
2. Document Facts: Extract concrete claims from text
3. Direct Overlap: Verify question-document concept matches
4. Indirect Support: Evaluate contextual relevance
5. Final Synthesis: Combine analysis into verdict

**Question:** {question}

**Document Excerpt:** {context}

Provide detailed reasoning through all steps, then state final decision as 'relevant' or 'irrelevant'."""


class RetrievalEvaluationResult(BaseModel):
    """Pydantic model for validating structured relevance evaluation results.

    Enforces complete reasoning chain and constrained decision output through
    validation rules. Designed for integration with LLM structured output pipelines.

    Attributes:
        reasoning_chain (str): Sequential analysis following required evaluation steps.
            Must contain all required section headers.
        decision (Literal["relevant", "irrelevant"]): Final relevance determination.

    Raises:
        ValidationError: If reasoning chain misses required sections or length constraints

    Examples:
        >>> valid_instance = RetrievalEvaluator(
        ...     reasoning_chain=(
        ...         "1. Question Core: Cloud security practices"
        ...         "2. Document Facts: AWS IAM details"
        ...         "3. Direct Overlap: Security focus match"
        ...         "4. Indirect Support: Implementation examples"
        ...         "5. Final Synthesis: Directly addresses question"
        ...     ),
        ...     decision="relevant"
        ... )
        >>> isinstance(valid_instance, RetrievalEvaluator)
        True
    """

    reasoning_chain: str = Field(
        default=...,
        description="Sequential analysis through required evaluation stages. Must contain: 1. Question Core, "
        "2. Document Facts, 3. Direct Overlap, 4. Indirect Support, 5. Final Synthesis sections.",
    )
    decision: Literal["relevant", "irrelevant"] = Field(
        default=...,
        description="Binary relevance determination based on structured analysis of document content against query "
        "requirements.",
    )

    @field_validator("reasoning_chain")
    @classmethod
    def validate_reasoning_steps(cls, chain_to_validate: str) -> str:
        r"""Validate reasoning chain contains all required analysis sections.

        Args:
            chain_to_validate (str): Input reasoning chain text to validate

        Returns:
            str: Validated reasoning chain text if all sections present

        Raises:
            ValueError: If any required section headers are missing from the text

        Example:
            >>> valid_chain = (
            ...     "1. Question Core: ... 2. Document Facts: ... 3. Direct Overlap: ...4. Indirect Support: ..."
            ...     "5. Final Synthesis: ..."
            ... )
            >>> RetrievalEvaluator.validate_reasoning_steps(valid_chain)
            '1. Question Core: ... 2. Document Facts: ... 3. Direct Overlap: ... 4. Indirect Support: ...
            5. Final Synthesis: ...'
        """
        required_steps = [
            "1. Question Core",
            "2. Document Facts",
            "3. Direct Overlap",
            "4. Indirect Support",
            "5. Final Synthesis",
        ]

        missing: list[str] = [step for step in required_steps if step not in chain_to_validate]
        if missing:
            msg = f"Missing required analysis steps: {missing}"
            raise ValueError(msg)
        return chain_to_validate

    model_config = {
        "json_schema_extra": {
            "example": {
                "reasoning_chain": (
                    "1. Question Core: Requires cloud security best practices\n"
                    "2. Document Facts: Details AWS IAM role configurations\n"
                    "3. Direct Overlap: Matches cloud security focus\n"
                    "4. Indirect Support: Provides implementation examples\n"
                    "5. Final Synthesis: Directly addresses core security question"
                ),
                "decision": "relevant",
            }
        }
    }