Skip to main content

Creating CLI Experiences with Distri

Distri can power command-line interfaces (CLIs) that provide interactive agent experiences. This guide shows you how to build CLI applications using Distri server as the backend.

Overview

A Distri-powered CLI typically:

  1. Connects to a Distri server (local or remote)
  2. Sends user input to agents
  3. Streams agent responses in real-time
  4. Handles tool calls and displays results

Architecture

┌─────────────┐
│ CLI App │
│ (Rust/Go/ │
│ Node.js) │
└──────┬──────┘
│ HTTP/WebSocket

┌──────▼──────────┐
│ Distri Server │
│ (distri serve) │
└─────────────────┘

Basic CLI Implementation

Using distri-client (Node.js/TypeScript)

import { Distri } from '@distri/core';
import readline from 'readline';

const client = new Distri({
baseUrl: process.env.DISTRI_API_URL || 'http://localhost:8787/api/v1',
});

const rl = readline.createInterface({
input: process.stdin,
output: process.stdout,
});

async function chatLoop(agentId: string) {
const threadId = crypto.randomUUID();

console.log(`Connected to agent: ${agentId}`);
console.log('Type your messages (or "exit" to quit):\n');

rl.on('line', async (input) => {
if (input.trim() === 'exit') {
rl.close();
process.exit(0);
}

try {
// Stream the response
const stream = await client.streamMessage({
agentId,
threadId,
message: {
role: 'user',
parts: [{ part_type: 'text', data: input }],
},
});

process.stdout.write('\nAgent: ');
for await (const chunk of stream) {
if (chunk.text) {
process.stdout.write(chunk.text);
}
if (chunk.tool_calls) {
for (const toolCall of chunk.tool_calls) {
console.log(`\n[Tool: ${toolCall.name}]`);
}
}
}
process.stdout.write('\n\n> ');
} catch (error) {
console.error('Error:', error);
process.stdout.write('> ');
}
});

process.stdout.write('> ');
}

// Run the CLI
const agentId = process.argv[2] || 'default_agent';
chatLoop(agentId);

Rust CLI Implementation

For a Rust CLI (like the browsr example), you can use the distri-client crate:

use distri_client::DistriClient;
use std::io::{self, Write};

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let client = DistriClient::new("http://localhost:8787/api/v1".to_string());
let agent_id = std::env::args().nth(1).unwrap_or_else(|| "default_agent".to_string());
let thread_id = uuid::Uuid::new_v4().to_string();

println!("Connected to agent: {}", agent_id);
println!("Type your messages (or 'exit' to quit):\n");

loop {
print!("> ");
io::stdout().flush()?;

let mut input = String::new();
io::stdin().read_line(&mut input)?;
let input = input.trim();

if input == "exit" {
break;
}

// Send message and stream response
let mut stream = client
.stream_message(&agent_id, &thread_id, input)
.await?;

print!("\nAgent: ");
while let Some(chunk) = stream.next().await {
match chunk {
Ok(response) => {
if let Some(text) = response.text {
print!("{}", text);
io::stdout().flush()?;
}
if let Some(tool_calls) = response.tool_calls {
for tool_call in tool_calls {
println!("\n[Tool: {}]", tool_call.name);
}
}
}
Err(e) => {
eprintln!("\nError: {}", e);
break;
}
}
}
println!("\n");
}

Ok(())
}

Interactive CLI with Prompts

Use a library like inquirer or prompts for better UX:

import { Distri } from '@distri/core';
import prompts from 'prompts';

const client = new Distri({
baseUrl: 'http://localhost:8787/api/v1',
});

async function interactiveCLI() {
// List available agents
const agents = await client.listAgents();

const { agentId } = await prompts({
type: 'select',
name: 'agentId',
message: 'Select an agent:',
choices: agents.map(agent => ({
title: agent.name,
value: agent.id,
description: agent.description,
})),
});

const threadId = crypto.randomUUID();
console.log(`\nStarting conversation with ${agentId}...\n`);

while (true) {
const { message } = await prompts({
type: 'text',
name: 'message',
message: 'You:',
});

if (!message || message.trim() === 'exit') {
break;
}

console.log('\nAgent:');
const stream = await client.streamMessage({
agentId,
threadId,
message: {
role: 'user',
parts: [{ part_type: 'text', data: message }],
},
});

for await (const chunk of stream) {
if (chunk.text) {
process.stdout.write(chunk.text);
}
}
console.log('\n');
}
}

interactiveCLI();

Handling Tool Calls

Display tool execution in the CLI:

async function handleToolCalls(
client: Distri,
agentId: string,
threadId: string,
toolCalls: any[]
) {
for (const toolCall of toolCalls) {
console.log(`\n[Executing tool: ${toolCall.name}]`);
console.log(`Input: ${JSON.stringify(toolCall.input, null, 2)}`);

// If you have local tool handlers
if (localTools.has(toolCall.name)) {
const tool = localTools.get(toolCall.name);
const result = await tool.handler(toolCall.input);
console.log(`Result: ${result}`);
} else {
// Tool is handled by the server
console.log('(Handled by server)');
}
}
}

CLI with Rich Output

Use libraries like chalk and ora for better formatting:

import chalk from 'chalk';
import ora from 'ora';

async function richCLI() {
const spinner = ora('Connecting to Distri server...').start();

try {
const client = new Distri({ baseUrl: 'http://localhost:8787/api/v1' });
const agents = await client.listAgents();
spinner.succeed('Connected');

const { agentId } = await prompts({
type: 'select',
name: 'agentId',
message: 'Select an agent:',
choices: agents.map(a => ({
title: chalk.cyan(a.name),
value: a.id,
description: a.description,
})),
});

console.log(chalk.green(`\n✓ Using agent: ${agentId}\n`));

while (true) {
const { message } = await prompts({
type: 'text',
name: 'message',
message: chalk.blue('You:'),
});

if (!message) break;

const thinkingSpinner = ora('Agent thinking...').start();
const stream = await client.streamMessage({
agentId,
threadId: crypto.randomUUID(),
message: {
role: 'user',
parts: [{ part_type: 'text', data: message }],
},
});

thinkingSpinner.stop();
process.stdout.write(chalk.green('Agent: '));

for await (const chunk of stream) {
if (chunk.text) {
process.stdout.write(chunk.text);
}
if (chunk.tool_calls) {
for (const toolCall of chunk.tool_calls) {
console.log(chalk.yellow(`\n[Tool: ${toolCall.name}]`));
}
}
}
console.log('\n');
}
} catch (error) {
spinner.fail('Connection failed');
console.error(chalk.red('Error:'), error);
}
}

Command-Line Arguments

Parse CLI arguments for configuration:

import { program } from 'commander';

program
.name('distri-cli')
.description('CLI for interacting with Distri agents')
.version('1.0.0')
.option('-s, --server <url>', 'Distri server URL', 'http://localhost:8787/api/v1')
.option('-a, --agent <name>', 'Agent name to use')
.option('-t, --thread <id>', 'Thread ID (creates new if not provided')
.argument('[message]', 'Message to send (interactive mode if omitted)')
.action(async (message, options) => {
const client = new Distri({ baseUrl: options.server });

if (message) {
// Single message mode
const threadId = options.thread || crypto.randomUUID();
const response = await client.sendMessage({
agentId: options.agent || 'default',
threadId,
message: {
role: 'user',
parts: [{ part_type: 'text', data: message }],
},
});
console.log(response.text);
} else {
// Interactive mode
await interactiveCLI(client, options.agent);
}
});

program.parse();

Example: Browsr CLI Pattern

Based on the browsr project structure:

// main.rs
use clap::{Parser, Subcommand};
use distri_client::DistriClient;

#[derive(Parser)]
#[command(name = "browsr")]
#[command(about = "Browser automation CLI powered by Distri")]
struct Cli {
#[command(subcommand)]
command: Commands,
}

#[derive(Subcommand)]
enum Commands {
/// Run a browser sequence
Run {
#[arg(short, long)]
agent: String,
#[arg(short, long)]
task: String,
},
/// List available agents
ListAgents,
/// Interactive chat mode
Chat {
#[arg(short, long)]
agent: Option<String>,
},
}

#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let cli = Cli::parse();
let client = DistriClient::new("http://localhost:8787/api/v1".to_string());

match cli.command {
Commands::Run { agent, task } => {
let thread_id = uuid::Uuid::new_v4().to_string();
let response = client
.send_message(&agent, &thread_id, &task)
.await?;
println!("{}", response.text.unwrap_or_default());
}
Commands::ListAgents => {
let agents = client.list_agents().await?;
for agent in agents {
println!("{} - {}", agent.name, agent.description);
}
}
Commands::Chat { agent } => {
interactive_chat(client, agent.unwrap_or_else(|| "default".to_string())).await?;
}
}

Ok(())
}

Best Practices

  1. Error Handling: Always handle connection errors and timeouts
  2. Streaming: Use streaming for better UX with long responses
  3. Configuration: Support environment variables and config files
  4. Logging: Add verbose/debug modes for troubleshooting
  5. Thread Management: Allow users to resume conversations

References