Basic Usage Examples
Common patterns for using the X-Router API.
Simple Chat Completion
The most basic usage - send a message and get a response:
import { wrapFetchWithPayment } from 'x402-fetch';
import { privateKeyToAccount } from 'viem/accounts';
const account = privateKeyToAccount(process.env.PRIVATE_KEY as `0x${string}`);
const fetchWithPayment = wrapFetchWithPayment(fetch, account);
async function simpleChatCompletion() {
const response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [
{ role: 'user', content: 'What is 2+2?' }
],
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 100
})
});
const data = await response.json();
console.log(data.content);
}
simpleChatCompletion();With System Prompt
Use a system prompt to guide the AI’s behavior:
async function chatWithSystemPrompt() {
const response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [
{
role: 'system',
content: 'You are a helpful coding assistant. Provide concise, practical answers.'
},
{
role: 'user',
content: 'How do I reverse a string in Python?'
}
],
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 200
})
});
const data = await response.json();
console.log(data.content);
console.log(`Tokens used: ${data.usage.total_tokens}`);
}Multi-Turn Conversation
Maintain conversation context across multiple turns:
async function multiTurnConversation() {
const messages = [];
// Turn 1
messages.push({ role: 'user', content: 'Hello!' });
let response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages,
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 100
})
});
let data = await response.json();
console.log('AI:', data.content);
// Add AI response to context
messages.push({ role: 'assistant', content: data.content });
// Turn 2
messages.push({ role: 'user', content: 'Can you help me with Python?' });
response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages,
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 200
})
});
data = await response.json();
console.log('AI:', data.content);
// Add to context again
messages.push({ role: 'assistant', content: data.content });
// Turn 3
messages.push({ role: 'user', content: 'Show me a list comprehension example' });
response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages,
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 150
})
});
data = await response.json();
console.log('AI:', data.content);
}Cost-Aware Requests
Check cost before making requests:
async function costAwareRequest(userMessage: string, maxCost: number = 0.01) {
const requestBody = {
messages: [{ role: 'user', content: userMessage }],
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 500
};
// Get cost estimate
const estimateResponse = await fetch('https://api.x-router.ai/v1/estimate', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody)
});
const estimate = await estimateResponse.json();
const cost = parseFloat(estimate.estimatedCost.usd);
console.log(`Estimated cost: $${cost}`);
if (cost > maxCost) {
console.log(`Cost exceeds maximum ($${maxCost}), skipping request`);
return null;
}
// Proceed with actual request
const response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(requestBody)
});
const data = await response.json();
return data.content;
}
// Usage
const response = await costAwareRequest('Explain quantum computing', 0.02);Using Different Models
Switch between models easily:
async function compareModels(prompt: string) {
const models = [
'anthropic/claude-3.5-sonnet',
'openai/gpt-4o',
'google/gemini-pro'
];
for (const model of models) {
console.log(`\nTrying ${model}:`);
const response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [{ role: 'user', content: prompt }],
model,
max_tokens: 100
})
});
const data = await response.json();
console.log(data.content);
console.log(`Tokens: ${data.usage.total_tokens}`);
}
}
compareModels('What is the capital of France?');Temperature Control
Adjust randomness in responses:
async function temperatureComparison() {
const prompt = 'Write a creative opening line for a sci-fi novel';
const temperatures = [0.0, 0.7, 1.5];
for (const temperature of temperatures) {
console.log(`\nTemperature ${temperature}:`);
const response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [{ role: 'user', content: prompt }],
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 50,
temperature
})
});
const data = await response.json();
console.log(data.content);
}
}Error Handling
Handle errors gracefully:
async function robustRequest(messages: any[]) {
try {
const response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages,
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 200
})
});
if (!response.ok) {
const error = await response.json();
throw new Error(`API Error (${response.status}): ${error.message}`);
}
const data = await response.json();
return data;
} catch (error) {
if (error.message.includes('402')) {
console.error('Payment failed. Check USDC balance.');
} else if (error.message.includes('429')) {
console.error('Rate limited. Waiting before retry...');
await new Promise(resolve => setTimeout(resolve, 60000));
return robustRequest(messages); // Retry
} else if (error.message.includes('USDC')) {
console.error('Insufficient USDC balance');
} else {
console.error('Unexpected error:', error.message);
}
throw error;
}
}Batch Processing
Process multiple requests efficiently:
async function batchProcess(prompts: string[]) {
const results = [];
for (const prompt of prompts) {
try {
const response = await fetchWithPayment('https://api.x-router.ai/v1/chat/completions', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
messages: [{ role: 'user', content: prompt }],
model: 'anthropic/claude-3.5-sonnet',
max_tokens: 100
})
});
const data = await response.json();
results.push({
prompt,
response: data.content,
tokens: data.usage.total_tokens
});
// Rate limit: wait between requests
await new Promise(resolve => setTimeout(resolve, 1000));
} catch (error) {
console.error(`Failed for prompt "${prompt}":`, error);
results.push({
prompt,
error: error.message
});
}
}
return results;
}
// Usage
const prompts = [
'What is AI?',
'Explain machine learning',
'What is deep learning?'
];
const results = await batchProcess(prompts);
console.log(results);Last updated on