Skip to content

Tool integration patterns

Every pattern below uses the mandatory prompt= argument (see migration notes). Swap BaseOpenAI, BaseGemini, BaseGroq (async_mode=True), BaseOllama (async_mode=True), and model names to match your provider — see Providers, Groq provider, and Ollama provider.

Pattern 1: @tool decorator + file tools

The most common pattern — custom logic via decorators, file access via built-ins:

from nucleusiq.agents import Agent
from nucleusiq.agents.config import AgentConfig, ExecutionMode
from nucleusiq.prompts.zero_shot import ZeroShotPrompt
from nucleusiq.tools.decorators import tool
from nucleusiq.tools.builtin import FileReadTool, FileSearchTool
from nucleusiq_openai import BaseOpenAI

@tool
def summarize(text: str) -> str:
    """Summarize the given text."""
    return text[:200] + "..."

agent = Agent(
    name="file-tools-demo",
    prompt=ZeroShotPrompt().configure(system="You are a helpful assistant."),
    llm=BaseOpenAI(model_name="gpt-4.1-mini"),
    tools=[
        summarize,
        FileReadTool(workspace_root="./workspace"),
        FileSearchTool(workspace_root="./workspace"),
    ],
    config=AgentConfig(execution_mode=ExecutionMode.STANDARD),
)

Pattern 2: OpenAI native tools

from nucleusiq.agents import Agent
from nucleusiq.agents.config import AgentConfig, ExecutionMode
from nucleusiq.prompts.zero_shot import ZeroShotPrompt
from nucleusiq_openai import BaseOpenAI, OpenAITool

agent = Agent(
    name="openai-native-tools",
    prompt=ZeroShotPrompt().configure(system="You are a helpful assistant."),
    llm=BaseOpenAI(model_name="gpt-4.1-mini"),
    tools=[
        OpenAITool.web_search(),
        OpenAITool.code_interpreter(),
    ],
    config=AgentConfig(execution_mode=ExecutionMode.STANDARD),
)

Pattern 3: Gemini native tools

from nucleusiq.agents import Agent
from nucleusiq.agents.config import AgentConfig, ExecutionMode
from nucleusiq.prompts.zero_shot import ZeroShotPrompt
from nucleusiq_gemini import BaseGemini, GeminiTool

agent = Agent(
    name="gemini-native-tools",
    prompt=ZeroShotPrompt().configure(system="You are a helpful assistant."),
    llm=BaseGemini(model_name="gemini-2.5-flash"),
    tools=[
        GeminiTool.google_search(),
        GeminiTool.code_execution(),
        GeminiTool.url_context(),
        GeminiTool.google_maps(),
    ],
    config=AgentConfig(execution_mode=ExecutionMode.STANDARD),
)

Pattern 4: Gemini native + custom tool mixing

New in v0.7.5

Combine Gemini's native tools with your custom tools in a single agent. NucleusIQ's proxy pattern handles the Gemini API restriction transparently:

from nucleusiq.agents import Agent
from nucleusiq.agents.config import AgentConfig, ExecutionMode
from nucleusiq.prompts.zero_shot import ZeroShotPrompt
from nucleusiq_gemini import BaseGemini
from nucleusiq_gemini.tools.gemini_tool import GeminiTool
from nucleusiq.tools.decorators import tool

@tool
def unit_converter(value: float, from_unit: str, to_unit: str) -> str:
    """Convert between units (km/miles, celsius/fahrenheit, kg/pounds)."""
    conversions = {
        ("celsius", "fahrenheit"): lambda v: v * 9 / 5 + 32,
        ("km", "miles"): lambda v: v * 0.621371,
    }
    result = conversions.get((from_unit, to_unit), lambda v: v)(value)
    return f"{value} {from_unit} = {result:.2f} {to_unit}"

agent = Agent(
    name="gemini-mixed-tools",
    prompt=ZeroShotPrompt().configure(system="You are a helpful assistant."),
    llm=BaseGemini(model_name="gemini-2.5-flash"),
    tools=[
        GeminiTool.google_search(),   # Native — searches the web
        GeminiTool.code_execution(),  # Native — runs Python code
        unit_converter,               # Custom — your business logic
    ],
    config=AgentConfig(execution_mode=ExecutionMode.STANDARD, enable_tracing=True),
)

Note

On Gemini 2.5 models, mixing native and custom tools would normally produce a 400 INVALID_ARGUMENT error. NucleusIQ handles this automatically — see the Gemini provider guide for details.

Pattern 5: Mixed tools with guardrails

Combine custom tools, native tools, and plugins for production safety:

from nucleusiq.agents import Agent
from nucleusiq.agents.config import AgentConfig, ExecutionMode
from nucleusiq.prompts.zero_shot import ZeroShotPrompt
from nucleusiq.tools.decorators import tool
from nucleusiq.tools.builtin import FileReadTool
from nucleusiq_gemini import BaseGemini, GeminiTool
from nucleusiq.plugins.builtin import ModelCallLimitPlugin, ToolGuardPlugin

@tool
def calculate(expression: str) -> str:
    """Evaluate a safe math expression (example only — validate in production)."""
    return str(eval(expression, {"__builtins__": {}}, {}))

agent = Agent(
    name="guarded-mix",
    prompt=ZeroShotPrompt().configure(system="You are a helpful assistant."),
    llm=BaseGemini(model_name="gemini-2.5-flash"),
    tools=[
        calculate,
        FileReadTool(workspace_root="./workspace"),
        GeminiTool.google_search(),
    ],
    plugins=[
        ModelCallLimitPlugin(max_calls=10),
        ToolGuardPlugin(allowed_tools=["calculate", "file_read", "google_search"]),
    ],
    config=AgentConfig(execution_mode=ExecutionMode.STANDARD),
)

Pattern 6: MCP integration (OpenAI)

from nucleusiq.agents import Agent
from nucleusiq.agents.config import AgentConfig, ExecutionMode
from nucleusiq.prompts.zero_shot import ZeroShotPrompt
from nucleusiq_openai import BaseOpenAI, OpenAITool
from nucleusiq.plugins.builtin import ModelCallLimitPlugin

agent = Agent(
    name="mcp-agent",
    prompt=ZeroShotPrompt().configure(system="You are a helpful assistant."),
    llm=BaseOpenAI(model_name="gpt-4.1-mini"),
    tools=[
        OpenAITool.mcp(
            server_label="my-mcp",
            server_description="Custom MCP server",
            server_url="https://my-server.example.com/sse",
        ),
    ],
    plugins=[ModelCallLimitPlugin(max_calls=10)],
    config=AgentConfig(execution_mode=ExecutionMode.STANDARD),
)

Pattern 7: @tool + local Ollama (alpha)

For local inference, @tool workflows match other providers; call await agent.initialize() before execute() when using BaseOllama.

import asyncio

from nucleusiq.agents import Agent
from nucleusiq.agents.config import AgentConfig, ExecutionMode
from nucleusiq.agents.task import Task
from nucleusiq.prompts.zero_shot import ZeroShotPrompt
from nucleusiq.tools.decorators import tool
from nucleusiq_ollama import BaseOllama, OllamaLLMParams


@tool
def greet(name: str) -> str:
    """Return a short greeting."""
    return f"Hello, {name}!"


async def main():
    agent = Agent(
        name="ollama-tools-demo",
        prompt=ZeroShotPrompt().configure(system="Use tools when helpful."),
        llm=BaseOllama(model_name="llama3.2", async_mode=True),
        tools=[greet],
        config=AgentConfig(
            execution_mode=ExecutionMode.STANDARD,
            llm_params=OllamaLLMParams(temperature=0.3, max_output_tokens=256),
        ),
    )
    await agent.initialize()
    r = await agent.execute(Task(id="t1", objective='Use the tool to greet Ada.'))
    print(r.output)


asyncio.run(main())

Alpha package

nucleusiq-ollama is alpha — pin nucleusiq>=0.7.10. Native OpenAI/Gemini-only tools (e.g. OpenAITool, GeminiTool) do not apply here; use @tool or see the Ollama provider matrix.

See also