Skip to content

pydantic_ai.models.test

Utility model for quickly testing apps built with PydanticAI.

TestModel dataclass

Bases: Model

A model specifically for testing purposes.

This will (by default) call all tools in the agent, then return a tool response if possible, otherwise a plain response.

How useful this model is will vary significantly.

Apart from __init__ derived by the dataclass decorator, all methods are private or match those of the base class.

Source code in pydantic_ai_slim/pydantic_ai/models/test.py
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
@dataclass
class TestModel(Model):
    """A model specifically for testing purposes.

    This will (by default) call all tools in the agent, then return a tool response if possible,
    otherwise a plain response.

    How useful this model is will vary significantly.

    Apart from `__init__` derived by the `dataclass` decorator, all methods are private or match those
    of the base class.
    """

    # NOTE: Avoid test discovery by pytest.
    __test__ = False

    call_tools: list[str] | Literal['all'] = 'all'
    """List of tools to call. If `'all'`, all tools will be called."""
    custom_result_text: str | None = None
    """If set, this text is return as the final result."""
    custom_result_args: Any | None = None
    """If set, these args will be passed to the result tool."""
    seed: int = 0
    """Seed for generating random data."""
    # these fields are set when the model is called by the agent
    agent_model_tools: Mapping[str, AbstractToolDefinition] | None = field(default=None, init=False)
    agent_model_allow_text_result: bool | None = field(default=None, init=False)
    agent_model_result_tools: list[AbstractToolDefinition] | None = field(default=None, init=False)

    async def agent_model(
        self,
        function_tools: Mapping[str, AbstractToolDefinition],
        allow_text_result: bool,
        result_tools: Sequence[AbstractToolDefinition] | None,
    ) -> AgentModel:
        self.agent_model_tools = function_tools
        self.agent_model_allow_text_result = allow_text_result
        self.agent_model_result_tools = list(result_tools) if result_tools is not None else None

        if self.call_tools == 'all':
            tool_calls = [(r.name, r) for r in function_tools.values()]
        else:
            tools_to_call = (function_tools[name] for name in self.call_tools)
            tool_calls = [(r.name, r) for r in tools_to_call]

        if self.custom_result_text is not None:
            assert allow_text_result, 'Plain response not allowed, but `custom_result_text` is set.'
            assert self.custom_result_args is None, 'Cannot set both `custom_result_text` and `custom_result_args`.'
            result: _utils.Either[str | None, Any | None] = _utils.Either(left=self.custom_result_text)
        elif self.custom_result_args is not None:
            assert result_tools is not None, 'No result tools provided, but `custom_result_args` is set.'
            result_tool = result_tools[0]

            if k := result_tool.outer_typed_dict_key:
                result = _utils.Either(right={k: self.custom_result_args})
            else:
                result = _utils.Either(right=self.custom_result_args)
        elif allow_text_result:
            result = _utils.Either(left=None)
        elif result_tools is not None:
            result = _utils.Either(right=None)
        else:
            result = _utils.Either(left=None)
        return TestAgentModel(tool_calls, result, self.agent_model_result_tools, self.seed)

    def name(self) -> str:
        return 'test-model'

call_tools class-attribute instance-attribute

call_tools: list[str] | Literal['all'] = 'all'

List of tools to call. If 'all', all tools will be called.

custom_result_text class-attribute instance-attribute

custom_result_text: str | None = None

If set, this text is return as the final result.

custom_result_args class-attribute instance-attribute

custom_result_args: Any | None = None

If set, these args will be passed to the result tool.

seed class-attribute instance-attribute

seed: int = 0

Seed for generating random data.

TestAgentModel dataclass

Bases: AgentModel

Implementation of AgentModel for testing purposes.

Source code in pydantic_ai_slim/pydantic_ai/models/test.py
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
@dataclass
class TestAgentModel(AgentModel):
    """Implementation of `AgentModel` for testing purposes."""

    # NOTE: Avoid test discovery by pytest.
    __test__ = False

    tool_calls: list[tuple[str, AbstractToolDefinition]]
    # left means the text is plain text; right means it's a function call
    result: _utils.Either[str | None, Any | None]
    result_tools: list[AbstractToolDefinition] | None
    seed: int
    step: int = 0
    last_message_count: int = 0

    async def request(self, messages: list[Message]) -> tuple[ModelAnyResponse, Cost]:
        return self._request(messages), Cost()

    @asynccontextmanager
    async def request_stream(self, messages: list[Message]) -> AsyncIterator[EitherStreamedResponse]:
        msg = self._request(messages)
        cost = Cost()
        if isinstance(msg, ModelTextResponse):
            yield TestStreamTextResponse(msg.content, cost)
        else:
            yield TestStreamStructuredResponse(msg, cost)

    def gen_tool_args(self, tool_def: AbstractToolDefinition) -> Any:
        return _JsonSchemaTestData(tool_def.json_schema, self.seed).generate()

    def _request(self, messages: list[Message]) -> ModelAnyResponse:
        if self.step == 0 and self.tool_calls:
            calls = [ToolCall.from_dict(name, self.gen_tool_args(args)) for name, args in self.tool_calls]
            self.step += 1
            self.last_message_count = len(messages)
            return ModelStructuredResponse(calls=calls)

        new_messages = messages[self.last_message_count :]
        self.last_message_count = len(messages)
        new_retry_names = {m.tool_name for m in new_messages if isinstance(m, RetryPrompt)}
        if new_retry_names:
            calls = [
                ToolCall.from_dict(name, self.gen_tool_args(args))
                for name, args in self.tool_calls
                if name in new_retry_names
            ]
            self.step += 1
            return ModelStructuredResponse(calls=calls)
        else:
            if response_text := self.result.left:
                self.step += 1
                if response_text.value is None:
                    # build up details of tool responses
                    output: dict[str, Any] = {}
                    for message in messages:
                        if isinstance(message, ToolReturn):
                            output[message.tool_name] = message.content
                    if output:
                        return ModelTextResponse(content=pydantic_core.to_json(output).decode())
                    else:
                        return ModelTextResponse(content='success (no tool calls)')
                else:
                    return ModelTextResponse(content=response_text.value)
            else:
                assert self.result_tools is not None, 'No result tools provided'
                custom_result_args = self.result.right
                result_tool = self.result_tools[self.seed % len(self.result_tools)]
                if custom_result_args is not None:
                    self.step += 1
                    return ModelStructuredResponse(calls=[ToolCall.from_dict(result_tool.name, custom_result_args)])
                else:
                    response_args = self.gen_tool_args(result_tool)
                    self.step += 1
                    return ModelStructuredResponse(calls=[ToolCall.from_dict(result_tool.name, response_args)])