-
Notifications
You must be signed in to change notification settings - Fork 0
Usage Guide
TenzinGayche edited this page Dec 15, 2025
·
1 revision
This guide provides practical examples for using the LangGraph Translation API, from basic translations to advanced workflows.
- Basic Translation
- Streaming Translation
- Glossary Extraction
- Standardization Workflow
- UCCA Generation
- Gloss Analysis
- Combo-Key Workflows
- Editor Comments
- Full Pipeline
curl -X POST http://localhost:8001/translate/single \
-H "Content-Type: application/json" \
-d '{
"text": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½ΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½£ΰΌΰ½¦ΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ",
"target_language": "english",
"model_name": "claude-sonnet-4-20250514"
}'import requests
response = requests.post(
"http://localhost:8001/translate/single",
json={
"text": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½ΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½£ΰΌΰ½¦ΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ",
"target_language": "english",
"model_name": "claude-sonnet-4-20250514"
}
)
result = response.json()
print(result["results"][0]["translated_text"])
# "The bodhisattva generates compassion for all sentient beings."import requests
texts = [
"ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦",
"སΰΎΰ½Όΰ½ΰΌΰ½ΰΌΰ½ΰ½²ΰ½",
"སΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½Ίΰ½ΰΌΰ½ΰ½Ό",
"ཀེསΰΌΰ½’ΰ½ΰΌΰ½ΰΎ±ΰ½²ΰΌΰ½ΰΌΰ½’ΰ½Όΰ½£ΰΌΰ½ΰ½΄ΰΌΰ½ΰΎ±ΰ½²ΰ½ΰΌΰ½"
]
response = requests.post(
"http://localhost:8001/translate",
json={
"texts": texts,
"target_language": "english",
"model_name": "claude-sonnet-4-20250514",
"batch_size": 2 # Process 2 at a time
}
)
result = response.json()
for item in result["results"]:
print(f"{item['original_text']} β {item['translated_text']}")response = requests.post(
"http://localhost:8001/translate",
json={
"texts": ["ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦"],
"target_language": "english",
"model_name": "claude-sonnet-4-20250514",
"user_rules": "Always translate ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ as 'bodhicitta', not 'mind of enlightenment'"
}
)import requests
import json
def stream_translation(texts: list, target_language: str):
response = requests.post(
"http://localhost:8001/translate/stream",
json={
"texts": texts,
"target_language": target_language,
"model_name": "claude-sonnet-4-20250514",
"batch_size": 2
},
stream=True
)
all_results = []
for line in response.iter_lines():
if not line:
continue
line = line.decode('utf-8')
if not line.startswith('data: '):
continue
event = json.loads(line[6:])
if event['type'] == 'batch_completed':
print(f"\nβ Batch complete ({len(event['batch_results'])} items)")
for result in event['batch_results']:
print(f" {result['original_text'][:20]}... β {result['translated_text'][:30]}...")
all_results.append(result)
elif event['type'] == 'completion':
print(f"\nβ
All done! {len(event['results'])} translations completed.")
return all_results
# Usage
texts = [
"ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦",
"སΰΎΰ½Όΰ½ΰΌΰ½ΰΌΰ½ΰ½²ΰ½",
"སΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½Ίΰ½ΰΌΰ½ΰ½Ό",
"ཀེསΰΌΰ½’ΰ½"
]
results = stream_translation(texts, "english")async function streamTranslation(texts, targetLanguage) {
const response = await fetch('/translate/stream', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
texts: texts,
target_language: targetLanguage,
model_name: 'claude-sonnet-4-20250514',
batch_size: 2
})
});
const reader = response.body.getReader();
const decoder = new TextDecoder();
let buffer = '';
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
const lines = buffer.split('\n');
buffer = lines.pop();
for (const line of lines) {
if (line.startsWith('data: ')) {
const event = JSON.parse(line.slice(6));
if (event.type === 'batch_completed') {
console.log('Batch:', event.batch_results);
// Update UI here
}
if (event.type === 'completion') {
console.log('Done:', event.results.length, 'translations');
}
}
}
}
}
// Usage
streamTranslation(
['ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦', 'སΰΎΰ½Όΰ½ΰΌΰ½ΰΌΰ½ΰ½²ΰ½'],
'english'
);import requests
# Step 1: Translate
translation_response = requests.post(
"http://localhost:8001/translate",
json={
"texts": ["ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌΰ½ΰ½’ΰΌΰ½ΰΎ±ΰΌ སΰΎΰ½Όΰ½ΰΌΰ½ΰΌΰ½ΰ½²ΰ½ΰΌΰ½£ΰΌΰ½ΰ½¦ΰΎ³ΰ½ΰΌΰ½ΰ½’ΰΌΰ½ΰΎ±ΰΌ"],
"target_language": "english",
"model_name": "claude-sonnet-4-20250514"
}
)
translation_results = translation_response.json()["results"]
# Step 2: Extract glossary
glossary_response = requests.post(
"http://localhost:8001/glossary/extract",
json={
"items": translation_results,
"model_name": "claude-sonnet-4-20250514",
"batch_size": 5
}
)
glossary = glossary_response.json()
print("Extracted Terms:")
for term in glossary["terms"]:
print(f" {term['source_term']} β {term['translated_term']}")import requests
import json
def stream_glossary_extraction(items):
response = requests.post(
"http://localhost:8001/glossary/extract/stream",
json={
"items": items,
"model_name": "claude-sonnet-4-20250514",
"batch_size": 3
},
stream=True
)
all_terms = []
for line in response.iter_lines():
if not line:
continue
line = line.decode('utf-8')
if not line.startswith('data: '):
continue
event = json.loads(line[6:])
if event['type'] == 'glossary_batch_completed':
print(f"Extracted {len(event['terms'])} terms")
all_terms.extend(event['terms'])
elif event['type'] == 'completion':
print(f"Total: {len(event['glossary']['terms'])} unique terms")
return all_termsimport requests
# Step 1: Translate multiple texts
texts = [
"ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌΰ½ΰ½’ΰΌΰ½ΰΎ±ΰΌ",
"ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½ΰΎ±ΰ½²ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌΰ½ΰ½’ΰΌΰ½ΰΎ±ΰ½ ΰ½ΌΰΌ",
"ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ"
]
trans_response = requests.post(
"http://localhost:8001/translate",
json={
"texts": texts,
"target_language": "english",
"model_name": "claude-sonnet-4-20250514"
}
)
results = trans_response.json()["results"]
# Step 2: Extract glossary
glossary_response = requests.post(
"http://localhost:8001/glossary/extract",
json={"items": results}
)
glossary = glossary_response.json()
# Step 3: Create items with glossary for analysis
items_with_glossary = [
{
"original_text": r["original_text"],
"translated_text": r["translated_text"],
"glossary": glossary["terms"]
}
for r in results
]
# Step 4: Analyze for inconsistencies
analysis_response = requests.post(
"http://localhost:8001/standardize/analyze",
json={"items": items_with_glossary}
)
inconsistencies = analysis_response.json()["inconsistent_terms"]
print("Inconsistent terms found:")
for term, translations in inconsistencies.items():
print(f" {term}: {translations}")
# Step 5: Apply standardization
standardization_response = requests.post(
"http://localhost:8001/standardize/apply",
json={
"items": items_with_glossary,
"standardization_pairs": [
{
"source_word": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦",
"standardized_translation": "bodhicitta"
}
],
"model_name": "claude-sonnet-4-20250514"
}
)
updated = standardization_response.json()["updated_items"]
print("\nStandardized translations:")
for item in updated:
print(f" {item['translated_text']}")import requests
import json
response = requests.post(
"http://localhost:8001/ucca/generate",
json={
"input_text": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½ΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½£ΰΌΰ½¦ΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ",
"commentary_1": "This verse describes the bodhisattva's practice.",
"model_name": "claude-sonnet-4-20250514"
}
)
result = response.json()
ucca_graph = result["ucca_graph"]
print("UCCA Graph:")
print(json.dumps(ucca_graph, indent=2, ensure_ascii=False))import requests
import json
items = [
{"input_text": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ"},
{"input_text": "སΰΎΰ½Όΰ½ΰΌΰ½ΰΌΰ½ΰ½²ΰ½ΰΌΰ½’ΰΎΰ½Όΰ½ΰ½¦ΰΌ"},
{"input_text": "སΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰ½Όΰ½ΰΌ"}
]
response = requests.post(
"http://localhost:8001/ucca/generate/stream",
json={
"items": items,
"model_name": "claude-sonnet-4-20250514",
"batch_size": 2
},
stream=True
)
for line in response.iter_lines():
if line and line.decode().startswith('data: '):
event = json.loads(line.decode()[6:])
if event.get('type') == 'ucca_item_completed':
print(f"Item {event['index']}: UCCA generated")import requests
import json
response = requests.post(
"http://localhost:8001/gloss/generate",
json={
"input_text": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ",
"model_name": "claude-sonnet-4-20250514"
}
)
result = response.json()
print(f"Standardized: {result['standardized_text']}")
print(f"Note: {result['note']}")
print(f"Glossary: {result['glossary']}")
analysis = json.loads(result['analysis'])
print("\nWord Analysis:")
for item in analysis:
print(f" {item['segment']}: {item['meaning']}")import requests
response = requests.post(
"http://localhost:8001/workflow/run",
json={
"combo_key": "source",
"input": {
"source": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ",
"target_language": "english"
},
"model_name": "claude-sonnet-4-20250514"
}
)
print(response.json()["translation"])import requests
# First generate UCCA and Gloss
ucca_response = requests.post(
"http://localhost:8001/ucca/generate",
json={
"input_text": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½ΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½£ΰΌΰ½¦ΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ",
"model_name": "claude-sonnet-4-20250514"
}
)
ucca = ucca_response.json()["ucca_graph"]
gloss_response = requests.post(
"http://localhost:8001/gloss/generate",
json={
"input_text": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½ΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½£ΰΌΰ½¦ΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ",
"model_name": "claude-sonnet-4-20250514"
}
)
gloss = {"glossary": gloss_response.json()["glossary"]}
# Now run workflow with all context
workflow_response = requests.post(
"http://localhost:8001/workflow/run",
json={
"combo_key": "source+ucca+gloss",
"input": {
"source": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½ΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½£ΰΌΰ½¦ΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ",
"ucca": ucca,
"gloss": gloss,
"target_language": "english"
},
"model_name": "claude-sonnet-4-20250514"
}
)
print(workflow_response.json()["translation"])import requests
response = requests.post(
"http://localhost:8001/workflow/run",
json={
"combo_key": "source+commentaries1",
"input": {
"source": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦",
"commentaries": [
"This term refers to the awakened heart-mind dedicated to liberation."
],
"target_language": "english"
},
"model_name": "claude-sonnet-4-20250514",
"custom_prompt": """You are a Buddhist scholar-translator.
Source Text: {source}
Commentary: {commentary1}
Translate the source text into {target_language}.
Use the commentary to inform your understanding.
Return only the translation, no explanation."""
}
)
print(response.json()["translation"])import requests
response = requests.post(
"http://localhost:8001/editor/comment",
json={
"messages": [
{"role": "user", "content": "Is 'bodhicitta' the right translation here?"},
{"role": "assistant", "content": "I used 'mind of enlightenment' instead."},
{"role": "user", "content": "@Comment Please review the terminology."}
],
"references": [
{
"type": "glossary",
"content": "ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ (bodhicitta): The mind of enlightenment; the altruistic aspiration to attain Buddhahood for the benefit of all sentient beings."
},
{
"type": "commentary",
"content": "Shantideva defines bodhicitta as the cause of all happiness."
}
],
"options": {
"model_name": "gemini-2.5-pro"
}
}
)
result = response.json()
print(f"Comment: {result['comment_text']}")
print(f"Citations: {result['citations_used']}")import requests
import json
response = requests.post(
"http://localhost:8001/editor/comment/stream",
json={
"messages": [
{"role": "user", "content": "Review this translation @Comment"}
],
"references": [
{"type": "glossary", "content": "Standard terminology..."}
]
},
stream=True
)
full_comment = ""
for line in response.iter_lines():
if line and line.decode().startswith('data: '):
event = json.loads(line.decode()[6:])
if event.get('type') == 'comment_delta':
print(event['text'], end='', flush=True)
full_comment += event['text']
elif event.get('type') == 'completion':
print("\n\nCitations:", event['citations_used'])import requests
import json
def full_translation_pipeline(source_text: str, target_language: str = "english"):
"""
Complete pipeline:
1. Generate UCCA
2. Generate Gloss
3. Translate with full context
4. Extract glossary
5. Return all results
"""
model = "claude-sonnet-4-20250514"
base_url = "http://localhost:8001"
print("Step 1: Generating UCCA...")
ucca_resp = requests.post(
f"{base_url}/ucca/generate",
json={"input_text": source_text, "model_name": model}
)
ucca = ucca_resp.json().get("ucca_graph", {})
print(" β UCCA generated")
print("Step 2: Generating Gloss...")
gloss_resp = requests.post(
f"{base_url}/gloss/generate",
json={"input_text": source_text, "model_name": model}
)
gloss_data = gloss_resp.json()
gloss = {"glossary": gloss_data.get("glossary", {})}
print(" β Gloss generated")
print("Step 3: Translating with full context...")
workflow_resp = requests.post(
f"{base_url}/workflow/run",
json={
"combo_key": "source+ucca+gloss",
"input": {
"source": source_text,
"ucca": ucca,
"gloss": gloss,
"target_language": target_language
},
"model_name": model
}
)
translation = workflow_resp.json().get("translation", "")
print(" β Translation complete")
print("Step 4: Extracting final glossary...")
glossary_resp = requests.post(
f"{base_url}/glossary/extract",
json={
"items": [{
"original_text": source_text,
"translated_text": translation,
"metadata": {}
}],
"model_name": model
}
)
final_glossary = glossary_resp.json()
print(" β Glossary extracted")
return {
"source": source_text,
"translation": translation,
"ucca": ucca,
"gloss": gloss_data,
"glossary": final_glossary["terms"]
}
# Usage
result = full_translation_pipeline(
"ΰ½ΰΎ±ΰ½ΰΌΰ½ΰ½΄ΰ½ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½ΰ½ΰ½ ΰΌΰ½¦ΰ½Ίΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½ΰ½ΰ½¦ΰΌΰ½
ΰ½ΰΌΰ½£ΰΌΰ½¦ΰΎΰ½²ΰ½ΰΌΰ½’ΰΎΰ½ΊΰΌΰ½ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½ΰΌ"
)
print("\n" + "="*50)
print("RESULTS")
print("="*50)
print(f"\nSource: {result['source']}")
print(f"\nTranslation: {result['translation']}")
print(f"\nGlossary Terms:")
for term in result['glossary']:
print(f" {term['source_term']} β {term['translated_term']}")For batch translations, always prefer streaming endpoints to provide user feedback.
The API caches translations and glossaries. Clear cache if you need fresh results:
requests.post("http://localhost:8001/system/clear-cache")- Use
batch_size=5for balanced performance - Larger batches are faster but less responsive
- Smaller batches provide more frequent updates
| Use Case | Recommended Model |
|---|---|
| Fast prototyping | gemini-2.5-flash |
| High quality | claude-sonnet-4-20250514 |
| Complex reasoning | gemini-2.5-pro |
| Buddhist-specific | dharamitra |
More context = better translations:
- Use UCCA for grammatically ambiguous texts
- Use commentaries for philosophical content
- Use Sanskrit for technical terminology
- API Reference - Complete endpoint docs
- Workflow System - Combo-key details
- Streaming Guide - SSE implementation
- Model Router - Model selection