}

PydanticAI Tutorial 2026: Build Type-Safe AI Agents in Python

PydanticAI Tutorial 2026: Build Type-Safe AI Agents in Python

What is PydanticAI?

PydanticAI is a Python framework for building AI agents with a FastAPI-inspired design. Unlike LangChain, it prioritizes:

  • Type safety: Pydantic models everywhere, full IDE autocomplete
  • Testability: test agents without calling real LLMs
  • Dependency injection: share database connections, config across tools
  • Simplicity: minimal abstractions, easy to understand
pip install pydantic-ai

Your First Agent

from pydantic_ai import Agent

agent = Agent(
    "claude-opus-4-5",
    system_prompt="You are a helpful Python tutor. Keep answers concise."
)

result = agent.run_sync("What is a list comprehension?")
print(result.data)  # The agent's text response

Structured Results with Pydantic

from pydantic import BaseModel
from pydantic_ai import Agent

class CodeReview(BaseModel):
    has_bugs: bool
    issues: list[str]
    score: int  # 1-10
    suggestion: str

agent = Agent(
    "claude-opus-4-5",
    result_type=CodeReview,
    system_prompt="You are a code reviewer. Analyze Python code and return structured feedback."
)

result = agent.run_sync("""
Review this code:
def divide(a, b):
    return a / b
""")

review = result.data  # Type: CodeReview
print(f"Has bugs: {review.has_bugs}")
print(f"Score: {review.score}/10")
print(f"Issues: {review.issues}")

Tools: Give Agents Capabilities

from pydantic_ai import Agent, RunContext
import httpx

agent = Agent(
    "claude-opus-4-5",
    system_prompt="Answer questions using the provided tools."
)

@agent.tool_plain
def get_python_version() -> str:
    """Get the current Python version."""
    import sys
    return f"Python {sys.version}"

@agent.tool_plain
def search_pypi(package_name: str) -> str:
    """Search for a Python package on PyPI."""
    r = httpx.get(f"https://pypi.org/pypi/{package_name}/json")
    if r.status_code == 404:
        return f"Package '{package_name}' not found"
    data = r.json()
    info = data["info"]
    return f"{info['name']} {info['version']}: {info['summary']}"

result = agent.run_sync("What version of Python am I using? Also, what does the requests package do?")
print(result.data)

Dependency Injection

Share state (database connections, config) across all tools:

from dataclasses import dataclass
from pydantic_ai import Agent, RunContext
import psycopg2

@dataclass
class DatabaseDeps:
    conn: psycopg2.extensions.connection
    user_id: int

agent = Agent(
    "claude-opus-4-5",
    deps_type=DatabaseDeps,
    system_prompt="You are a helpful assistant with access to the user database."
)

@agent.tool
async def get_user_info(ctx: RunContext[DatabaseDeps]) -> str:
    """Get information about the current user."""
    cur = ctx.deps.conn.cursor()
    cur.execute("SELECT name, email FROM users WHERE id = %s", (ctx.deps.user_id,))
    user = cur.fetchone()
    return f"Name: {user[0]}, Email: {user[1]}"

@agent.tool
async def get_user_orders(ctx: RunContext[DatabaseDeps], limit: int = 5) -> str:
    """Get recent orders for the current user."""
    cur = ctx.deps.conn.cursor()
    cur.execute(
        "SELECT order_id, total FROM orders WHERE user_id = %s LIMIT %s",
        (ctx.deps.user_id, limit)
    )
    orders = cur.fetchall()
    return "\n".join(f"Order {o[0]}: ${o[1]}" for o in orders)

# Use with real database
conn = psycopg2.connect("postgresql://...")
deps = DatabaseDeps(conn=conn, user_id=42)
result = agent.run_sync("What are my recent orders?", deps=deps)

Testing Without Calling the LLM

from pydantic_ai.models.test import TestModel
from pydantic_ai import Agent

agent = Agent("claude-opus-4-5", result_type=CodeReview)

# Replace the real LLM with a mock
with agent.override(model=TestModel()):
    result = agent.run_sync("Review this code")
    # TestModel returns default values — useful for testing tool logic

Use with Local Ollama

from pydantic_ai import Agent
from pydantic_ai.models.openai import OpenAIModel
from openai import AsyncOpenAI

# Ollama has an OpenAI-compatible API
ollama_client = AsyncOpenAI(
    base_url="http://localhost:11434/v1",
    api_key="ollama"
)

model = OpenAIModel("llama3.2", openai_client=ollama_client)

agent = Agent(model, system_prompt="You are a helpful assistant.")
result = agent.run_sync("Hello!")
print(result.data)

Streaming Responses

import asyncio
from pydantic_ai import Agent

agent = Agent("claude-haiku-4-5")

async def main():
    async with agent.run_stream("Explain Python generators step by step") as response:
        async for text in response.stream_text():
            print(text, end="", flush=True)

asyncio.run(main())

Multi-turn Conversation

agent = Agent("claude-opus-4-5")

# Keep message history
result1 = agent.run_sync("My name is Alice")
result2 = agent.run_sync("What is my name?", message_history=result1.new_messages())

print(result2.data)  # "Your name is Alice"

Leonardo Lazzaro

Software engineer and technical writer. 10+ years experience in DevOps, Python, and Linux systems.

More articles by Leonardo Lazzaro