Create a reusable chat object for interacting with Anthropic's Claude API.
The returned object provides $chat() for text responses and $chat_structured()
for structured data extraction. Supports extended thinking for complex reasoning.
Usage
claude_new(
sys_prompt = NULL,
ml = NULL,
temp = 1,
max_tokens = NULL,
think_tokens = NULL,
think_effort = NULL,
interleaved = FALSE,
tools = NULL,
beta = character(),
cache = "5m",
echo = "none",
effort = NULL,
context_edit = NULL,
token_efficient = FALSE,
skills = NULL
)Arguments
- sys_prompt
Character. System prompt to set model behavior. Default NULL.
- ml
Character. Model ID. Default from
ART_CLAUDE_MODEL(opus 4.5). Useclaude_models()to see options.- temp
Numeric. Temperature (0-1). Lower = deterministic, higher = creative. Default 1. Note: Must be 1 when using extended thinking.
- max_tokens
Integer. Maximum output tokens. Default from
ART_CLAUDE_MAX_TOKENS.- think_tokens
Integer. Max tokens for thinking (min 1024). Enables extended thinking where Claude reasons step-by-step before responding. Mutually exclusive with
think_effort. Default NULL (no extended thinking).- think_effort
Character. Reasoning effort level: "low", "medium", or "high". Alternative to
think_tokens. Mutually exclusive withthink_tokens. Default NULL.- interleaved
Logical. Enable interleaved thinking (think between tool calls). Automatically adds required beta header. Default FALSE.
- tools
List. Tools to register with the chat. Create with
claude_tool(), or use built-in tools:claude_web_search(),claude_code_exec(). Default NULL.- beta
Character vector. Beta headers for experimental features. See beta_headers for available constants.
- cache
Character. Prompt caching: "5m", "1h", or "none". Default "5m".
- echo
Character. Output mode: "none", "output", or "all". Default "none".
- effort
Character. Token efficiency level: "low", "medium", or "high". Only available for Opus 4.5. Reduces token consumption while maintaining quality. At "medium", Opus 4.5 matches Sonnet 4.5 performance with 76% fewer tokens. Default NULL (no effort control).
- context_edit
Context management configuration from
claude_context_edit(). Automatically clears stale tool results to prevent context exhaustion. Default NULL (no context editing).- token_efficient
Logical. Enable token-efficient tool responses. Reduces output tokens by up to 70%. Default FALSE.
- skills
List. Agent Skills to load. Create with
claude_skill(),claude_skill_local(), or reference pre-built skills ("powerpoint", "excel", "word", "pdf"). Requires code execution tool. Default NULL.
Value
ellmer Chat object with methods:
$chat(...): Send message(s), receive text response$chat_structured(prompt, type): Send message, receive structured data$stream(...): Stream response chunks$set_params(params): Update parameters mid-conversation$register_tool(tool): Add a tool after creation
See also
Other claude-api:
claude_computer()
Examples
if (FALSE) { # \dontrun{
# -----------------------------------------------------------------
# BASIC TEXT
# -----------------------------------------------------------------
chat <- claude_new()
chat$chat("Explain Impressionism in 2 sentences.")
# With system prompt
chat <- claude_new(sys_prompt = "You are an art historian. Be concise.")
chat$chat("What is Baroque?")
# With temperature (0 = deterministic, 1 = creative)
chat <- claude_new(temp = 0.9)
chat$chat("Write a haiku about art.")
# -----------------------------------------------------------------
# MULTI-TURN CONVERSATION
# -----------------------------------------------------------------
chat <- claude_new(sys_prompt = "You are an art expert.")
chat$chat("What is Baroque art?")
chat$chat("How does it differ from Renaissance?")
chat$chat("Name 3 Baroque painters.")
# -----------------------------------------------------------------
# IMAGE INPUT
# -----------------------------------------------------------------
# From local file
chat <- claude_new()
chat$chat(ellmer::content_image_file("artwork.png"), "Describe this artwork.")
# From URL
chat$chat(ellmer::content_image_url("https://example.com/painting.jpg"), "What style is this?")
# Multiple images
chat$chat(
ellmer::content_image_file("painting1.png"),
ellmer::content_image_file("painting2.png"),
"Compare these two artworks."
)
# -----------------------------------------------------------------
# PDF INPUT
# -----------------------------------------------------------------
chat <- claude_new()
chat$chat(ellmer::content_pdf_file("catalog.pdf"), "Summarize this art catalog.")
# -----------------------------------------------------------------
# STRUCTURED OUTPUT
# -----------------------------------------------------------------
# Define schema
art_schema <- ellmer::type_object(
style = ellmer::type_string("Art movement or style"),
period = ellmer::type_string("Time period"),
notable_artists = ellmer::type_array(ellmer::type_string(), "Key artists")
)
chat <- claude_new(temp = 0)
result <- chat$chat_structured("Describe Impressionism", type = art_schema)
# Returns: list(style = "Impressionism", period = "1860s-1880s", notable_artists = c(...))
# Structured in multi-turn
chat <- claude_new()
chat$chat("Tell me about Van Gogh")
chat$chat_structured(
"List his 3 most famous paintings",
type = ellmer::type_array(ellmer::type_string())
)
# -----------------------------------------------------------------
# STREAMING
# -----------------------------------------------------------------
chat <- claude_new()
stream <- chat$stream("Tell me a story about an artist.")
coro::loop(for (chunk in stream) cat(chunk))
# -----------------------------------------------------------------
# MODEL SELECTION
# -----------------------------------------------------------------
# Use Sonnet for faster/cheaper responses
chat <- claude_new(ml = "claude-sonnet-4-5-20250929")
chat$chat("Quick question")
# Use Opus for complex reasoning (default)
chat <- claude_new(ml = "claude-opus-4-5-20251101")
chat$chat("Complex analysis...")
# -----------------------------------------------------------------
# BETA FEATURES
# -----------------------------------------------------------------
# 1M token context window (Sonnet only)
chat <- claude_new(
ml = "claude-sonnet-4-5-20250929",
beta = beta_headers$BETA_1M_CONTEXT
)
chat$chat(large_document)
# Multiple beta features
chat <- claude_new(beta = c(
beta_headers$BETA_CODE_EXEC_BASH,
beta_headers$BETA_FILES_API
))
# -----------------------------------------------------------------
# PROMPT CACHING
# -----------------------------------------------------------------
# Cache for 5 minutes (default)
chat <- claude_new(cache = "5m")
# Cache for 1 hour
chat <- claude_new(cache = "1h")
# No caching
chat <- claude_new(cache = "none")
# -----------------------------------------------------------------
# PARAMETER UPDATES MID-CONVERSATION
# -----------------------------------------------------------------
chat <- claude_new(temp = 0)
chat$chat("Factual question")
chat$set_params(ellmer::params(temperature = 0.9))
chat$chat("Creative question")
# -----------------------------------------------------------------
# EXTENDED THINKING
# -----------------------------------------------------------------
# Enable with token budget (min 1024)
chat <- claude_new(think_tokens = 16000)
chat$chat("Prove there are infinitely many prime numbers.")
# Or use effort levels
chat <- claude_new(think_effort = "high")
chat$chat("Design a caching architecture for a high-traffic API.")
# Multi-turn with extended thinking
chat <- claude_new(think_tokens = 10000)
chat$chat("Let's solve this step by step: What is 17^4?")
chat$chat("Now factor that result into primes.")
# Interleaved thinking (for tool use scenarios)
chat <- claude_new(think_tokens = 20000, interleaved = TRUE)
chat$chat("Analyze this problem using multiple approaches.")
# Token budget guidelines:
# - 1024-4000: Quick reasoning tasks
# - 4000-16000: Moderate complexity
# - 16000-32000: Complex multi-step problems
# - 32000+: Very complex problems
# -----------------------------------------------------------------
# EFFORT CONTROL (Opus 4.5 Only)
# -----------------------------------------------------------------
# Reduce token consumption while maintaining quality
chat <- claude_new(effort = "medium")
chat$chat("Explain quantum computing")
# At medium effort, uses 76% fewer tokens than default
# Low effort for simple tasks
chat <- claude_new(effort = "low")
chat$chat("What is 2+2?")
# High effort (equivalent to default behavior)
chat <- claude_new(effort = "high")
chat$chat("Analyze this complex problem...")
# Effort requires Opus 4.5
chat <- claude_new(
ml = "claude-opus-4-5-20251101",
effort = "medium"
)
# -----------------------------------------------------------------
# TOOL USE
# -----------------------------------------------------------------
# Custom tool
weather_tool <- claude_tool(
fn = function(city) paste("Weather in", city, ": Sunny, 72F"),
name = "get_weather",
desc = "Get current weather for a city",
city = ellmer::type_string("City name")
)
chat <- claude_new(tools = list(weather_tool))
chat$chat("What's the weather in Paris?")
# Built-in web search
chat <- claude_new(tools = list(claude_web_search()))
chat$chat("What are the latest AI developments?")
# Code execution (requires beta header)
chat <- claude_new(
tools = list(claude_code_exec()),
beta = beta_headers$BETA_CODE_EXEC_BASH
)
chat$chat("Calculate the first 20 Fibonacci numbers")
# Multiple tools
chat <- claude_new(tools = list(
claude_web_search(),
weather_tool
))
chat$chat("Search for weather forecasting APIs")
# Register tool after creation
chat <- claude_new()
chat$register_tool(weather_tool)
chat$chat("What's the weather in Tokyo?")
} # }
