Skip to content

Commit c900eaf

Browse files
committed
feat(examples): add Rust examples 11-19 and fix CI/CD issues
Add comprehensive examples demonstrating advanced LLMKit features: - 11_embeddings: Text embedding generation with multiple providers - 12_audio_synthesis: Text-to-speech with OpenAI TTS - 13_audio_transcription: Speech-to-text with Whisper - 14_image_generation: Image generation with DALL-E and Stable Diffusion - 15_specialized_api: Ranking, moderation, and classification - 16_video_generation: Video generation with RunwayML - 17_response_caching: Cache configuration demonstration - 18_retry_resilience: Retry configuration and error handling - 19_openai_compatible: Generic OpenAI-compatible provider setup Also fixes: - Add feature gates for Anthropic-specific integration tests - Export missing Python binding types (audio, video, image, specialized) - Fix Node.js unit test expectations for model info and capabilities - Add try/catch for Vertex/Bedrock tests when features unavailable
1 parent cc01df9 commit c900eaf

14 files changed

Lines changed: 1476 additions & 40 deletions

Cargo.toml

Lines changed: 35 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -564,5 +564,38 @@ required-features = ["openrouter"]
564564
name = "10_batch_processing"
565565
required-features = ["openrouter"]
566566

567-
# Examples 11-19 are available in Python and TypeScript.
568-
# Rust implementations will be added as the APIs become available.
567+
[[example]]
568+
name = "11_embeddings"
569+
required-features = ["openai"]
570+
571+
[[example]]
572+
name = "12_audio_synthesis"
573+
required-features = ["openai"]
574+
575+
[[example]]
576+
name = "13_audio_transcription"
577+
required-features = ["openai"]
578+
579+
[[example]]
580+
name = "14_image_generation"
581+
required-features = ["openai"]
582+
583+
[[example]]
584+
name = "15_specialized_api"
585+
required-features = ["cohere", "openai"]
586+
587+
[[example]]
588+
name = "16_video_generation"
589+
required-features = ["runwayml"]
590+
591+
[[example]]
592+
name = "17_response_caching"
593+
required-features = ["openrouter"]
594+
595+
[[example]]
596+
name = "18_retry_resilience"
597+
required-features = ["openrouter"]
598+
599+
[[example]]
600+
name = "19_openai_compatible"
601+
required-features = ["openai-compatible"]

examples/11_embeddings.rs

Lines changed: 141 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,141 @@
1+
//! Embeddings Example
2+
//!
3+
//! Demonstrates text embedding generation and similarity computation.
4+
//!
5+
//! Requirements:
6+
//! - Set OPENAI_API_KEY environment variable (for text-embedding-3-small)
7+
//!
8+
//! Run:
9+
//! cargo run --example 11_embeddings
10+
11+
use llmkit::{EmbeddingRequest, LLMKitClient};
12+
13+
#[tokio::main]
14+
async fn main() -> llmkit::Result<()> {
15+
let client = LLMKitClient::builder()
16+
.with_openai_from_env()
17+
.build()
18+
.await?;
19+
20+
// Example 1: Basic embedding
21+
println!("=== Example 1: Basic Embedding ===");
22+
let request = EmbeddingRequest::new("openai/text-embedding-3-small", "Hello, world!");
23+
let response = client.embed(request).await?;
24+
25+
println!("Model: {}", response.model);
26+
println!("Dimensions: {}", response.dimensions());
27+
if let Some(values) = response.values() {
28+
println!("First 5 values: {:?}", &values[..5.min(values.len())]);
29+
}
30+
println!("Tokens used: {}", response.usage.total_tokens);
31+
32+
// Example 2: Batch embeddings
33+
println!("\n=== Example 2: Batch Embeddings ===");
34+
let texts = vec![
35+
"The quick brown fox",
36+
"A lazy dog sleeps",
37+
"Python is a programming language",
38+
"Machine learning is fascinating",
39+
];
40+
41+
let request = EmbeddingRequest::batch("openai/text-embedding-3-small", texts.clone());
42+
let response = client.embed(request).await?;
43+
44+
println!("Got {} embeddings", response.embeddings.len());
45+
println!("Dimensions: {}", response.dimensions());
46+
for emb in &response.embeddings {
47+
println!(" Text {}: {} dimensions", emb.index, emb.values.len());
48+
}
49+
50+
// Example 3: Compute similarity
51+
println!("\n=== Example 3: Similarity Computation ===");
52+
let similarity_texts = vec![
53+
"I love programming in Python",
54+
"Python coding is my favorite",
55+
"The weather is nice today",
56+
"I enjoy writing code",
57+
];
58+
59+
let request =
60+
EmbeddingRequest::batch("openai/text-embedding-3-small", similarity_texts.clone());
61+
let response = client.embed(request).await?;
62+
63+
let reference = &response.embeddings[0];
64+
println!("Reference: '{}'", similarity_texts[0]);
65+
println!("\nSimilarities:");
66+
67+
for (i, emb) in response.embeddings.iter().skip(1).enumerate() {
68+
let similarity = reference.cosine_similarity(emb);
69+
println!(" vs '{}': {:.4}", similarity_texts[i + 1], similarity);
70+
}
71+
72+
// Example 4: Semantic search
73+
println!("\n=== Example 4: Semantic Search ===");
74+
let documents = vec![
75+
"Python is a high-level programming language",
76+
"Machine learning uses algorithms to learn from data",
77+
"The Eiffel Tower is located in Paris, France",
78+
"Deep learning is a subset of machine learning",
79+
"JavaScript is commonly used for web development",
80+
"Natural language processing deals with text and speech",
81+
];
82+
83+
let query = "What is artificial intelligence?";
84+
println!("Query: '{}'", query);
85+
86+
// Embed query
87+
let query_response = client
88+
.embed(EmbeddingRequest::new(
89+
"openai/text-embedding-3-small",
90+
query,
91+
))
92+
.await?;
93+
let query_embedding = &query_response.embeddings[0];
94+
95+
// Embed documents
96+
let doc_response = client
97+
.embed(EmbeddingRequest::batch(
98+
"openai/text-embedding-3-small",
99+
documents.clone(),
100+
))
101+
.await?;
102+
103+
// Compute similarities and rank
104+
let mut results: Vec<(f32, &str)> = doc_response
105+
.embeddings
106+
.iter()
107+
.enumerate()
108+
.map(|(i, emb)| (query_embedding.cosine_similarity(emb), documents[i]))
109+
.collect();
110+
111+
results.sort_by(|a, b| b.0.partial_cmp(&a.0).unwrap());
112+
113+
println!("\nResults (by relevance):");
114+
for (similarity, doc) in results {
115+
println!(" [{:.4}] {}", similarity, doc);
116+
}
117+
118+
// Example 5: Distance metrics
119+
println!("\n=== Example 5: Distance Metrics ===");
120+
let metric_texts = vec!["Hello world", "Hello there"];
121+
122+
let response = client
123+
.embed(EmbeddingRequest::batch(
124+
"openai/text-embedding-3-small",
125+
metric_texts.clone(),
126+
))
127+
.await?;
128+
129+
let emb1 = &response.embeddings[0];
130+
let emb2 = &response.embeddings[1];
131+
132+
println!("Text 1: '{}'", metric_texts[0]);
133+
println!("Text 2: '{}'", metric_texts[1]);
134+
println!();
135+
println!("Distance metrics:");
136+
println!(" Cosine similarity: {:.6}", emb1.cosine_similarity(emb2));
137+
println!(" Dot product: {:.6}", emb1.dot_product(emb2));
138+
println!(" Euclidean distance: {:.6}", emb1.euclidean_distance(emb2));
139+
140+
Ok(())
141+
}

examples/12_audio_synthesis.rs

Lines changed: 125 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,125 @@
1+
//! Audio Synthesis (Text-to-Speech) Example
2+
//!
3+
//! Demonstrates speech generation from text using various providers.
4+
//!
5+
//! Requirements:
6+
//! - Set OPENAI_API_KEY environment variable (for OpenAI TTS)
7+
//! Or ELEVENLABS_API_KEY for ElevenLabs
8+
//!
9+
//! Run:
10+
//! cargo run --example 12_audio_synthesis
11+
12+
use llmkit::{AudioFormat, LLMKitClient, SpeechRequest};
13+
use std::path::Path;
14+
15+
#[tokio::main]
16+
async fn main() -> Result<(), Box<dyn std::error::Error>> {
17+
let client = LLMKitClient::builder()
18+
.with_openai_from_env()
19+
.build()
20+
.await?;
21+
22+
println!("=== Audio Synthesis Example ===\n");
23+
24+
// Example 1: Basic speech synthesis
25+
println!("--- Example 1: Basic Speech Synthesis ---");
26+
let text = "Hello! This is a demonstration of text-to-speech synthesis using LLMKit.";
27+
println!("Input text: {}", text);
28+
29+
let request = SpeechRequest::new("openai/tts-1", text, "alloy");
30+
let response = client.speech(request).await?;
31+
32+
let output_path = "output_basic.mp3";
33+
response.save(output_path)?;
34+
println!("Audio saved to: {}", output_path);
35+
println!("Audio size: {} bytes", response.audio.len());
36+
println!("Format: {:?}", response.format);
37+
38+
// Example 2: High-quality speech with different voice
39+
println!("\n--- Example 2: High-Quality Speech ---");
40+
let text = "This is high-quality audio synthesis with the 'nova' voice.";
41+
42+
let request = SpeechRequest::new("openai/tts-1-hd", text, "nova");
43+
let response = client.speech(request).await?;
44+
45+
let output_path = "output_hd.mp3";
46+
response.save(output_path)?;
47+
println!("HD audio saved to: {}", output_path);
48+
println!("Audio size: {} bytes", response.audio.len());
49+
50+
// Example 3: Different audio format
51+
println!("\n--- Example 3: Different Audio Format ---");
52+
let text = "Testing different audio formats.";
53+
54+
let request =
55+
SpeechRequest::new("openai/tts-1", text, "shimmer").with_format(AudioFormat::Opus);
56+
let response = client.speech(request).await?;
57+
58+
let output_path = "output_opus.opus";
59+
response.save(output_path)?;
60+
println!("Opus audio saved to: {}", output_path);
61+
println!("Format: {:?}", response.format);
62+
63+
// Example 4: Adjusted speech speed
64+
println!("\n--- Example 4: Adjusted Speech Speed ---");
65+
let text = "This speech is generated at a faster pace.";
66+
67+
let request = SpeechRequest::new("openai/tts-1", text, "echo").with_speed(1.25);
68+
let response = client.speech(request).await?;
69+
70+
let output_path = "output_fast.mp3";
71+
response.save(output_path)?;
72+
println!("Fast audio saved to: {}", output_path);
73+
74+
// Example 5: Slow speech
75+
println!("\n--- Example 5: Slow Speech ---");
76+
let text = "This speech is generated at a slower, more deliberate pace.";
77+
78+
let request = SpeechRequest::new("openai/tts-1", text, "fable").with_speed(0.8);
79+
let response = client.speech(request).await?;
80+
81+
let output_path = "output_slow.mp3";
82+
response.save(output_path)?;
83+
println!("Slow audio saved to: {}", output_path);
84+
85+
// Example 6: Multiple voices comparison
86+
println!("\n--- Example 6: Voice Comparison ---");
87+
let voices = ["alloy", "echo", "fable", "onyx", "nova", "shimmer"];
88+
let text = "The quick brown fox jumps over the lazy dog.";
89+
90+
for voice in voices {
91+
let request = SpeechRequest::new("openai/tts-1", text, voice);
92+
let response = client.speech(request).await?;
93+
94+
let output_path = format!("output_{}.mp3", voice);
95+
response.save(&output_path)?;
96+
println!(" Voice '{}': {} bytes", voice, response.audio.len());
97+
}
98+
99+
// Clean up example files
100+
println!("\n--- Cleanup ---");
101+
let files_to_clean = [
102+
"output_basic.mp3",
103+
"output_hd.mp3",
104+
"output_opus.opus",
105+
"output_fast.mp3",
106+
"output_slow.mp3",
107+
"output_alloy.mp3",
108+
"output_echo.mp3",
109+
"output_fable.mp3",
110+
"output_onyx.mp3",
111+
"output_nova.mp3",
112+
"output_shimmer.mp3",
113+
];
114+
115+
for file in &files_to_clean {
116+
if Path::new(file).exists() {
117+
std::fs::remove_file(file)?;
118+
println!("Cleaned up: {}", file);
119+
}
120+
}
121+
122+
println!("\nAudio synthesis examples completed!");
123+
124+
Ok(())
125+
}

0 commit comments

Comments
 (0)