Skip to content

Usage Guide

TenzinGayche edited this page Dec 15, 2025 · 1 revision

Usage Guide

This guide provides practical examples for using the LangGraph Translation API, from basic translations to advanced workflows.


πŸ“‹ Table of Contents


🌐 Basic Translation

Single Text Translation

cURL

curl -X POST http://localhost:8001/translate/single \
  -H "Content-Type: application/json" \
  -d '{
    "text": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½“ΰΌ‹ΰ½ΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ",
    "target_language": "english",
    "model_name": "claude-sonnet-4-20250514"
  }'

Python

import requests

response = requests.post(
    "http://localhost:8001/translate/single",
    json={
        "text": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½“ΰΌ‹ΰ½ΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ",
        "target_language": "english",
        "model_name": "claude-sonnet-4-20250514"
    }
)

result = response.json()
print(result["results"][0]["translated_text"])
# "The bodhisattva generates compassion for all sentient beings."

Batch Translation

import requests

texts = [
    "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦",
    "ΰ½¦ΰΎŸΰ½Όΰ½„ΰΌ‹ΰ½”ΰΌ‹ΰ½‰ΰ½²ΰ½‘",
    "སྙིང་དྷྗེ་ཆེན་པོ",
    "ཀེས་དྷབ་ཀྱི་ཕ་དྷོལ་ཏུ་ཕྱིན་པ"
]

response = requests.post(
    "http://localhost:8001/translate",
    json={
        "texts": texts,
        "target_language": "english",
        "model_name": "claude-sonnet-4-20250514",
        "batch_size": 2  # Process 2 at a time
    }
)

result = response.json()
for item in result["results"]:
    print(f"{item['original_text']} β†’ {item['translated_text']}")

With Custom Rules

response = requests.post(
    "http://localhost:8001/translate",
    json={
        "texts": ["ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦"],
        "target_language": "english",
        "model_name": "claude-sonnet-4-20250514",
        "user_rules": "Always translate ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ as 'bodhicitta', not 'mind of enlightenment'"
    }
)

πŸ“‘ Streaming Translation

Python with Real-Time Display

import requests
import json

def stream_translation(texts: list, target_language: str):
    response = requests.post(
        "http://localhost:8001/translate/stream",
        json={
            "texts": texts,
            "target_language": target_language,
            "model_name": "claude-sonnet-4-20250514",
            "batch_size": 2
        },
        stream=True
    )
    
    all_results = []
    
    for line in response.iter_lines():
        if not line:
            continue
        
        line = line.decode('utf-8')
        if not line.startswith('data: '):
            continue
        
        event = json.loads(line[6:])
        
        if event['type'] == 'batch_completed':
            print(f"\nβœ“ Batch complete ({len(event['batch_results'])} items)")
            for result in event['batch_results']:
                print(f"  {result['original_text'][:20]}... β†’ {result['translated_text'][:30]}...")
                all_results.append(result)
        
        elif event['type'] == 'completion':
            print(f"\nβœ… All done! {len(event['results'])} translations completed.")
    
    return all_results

# Usage
texts = [
    "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦",
    "ΰ½¦ΰΎŸΰ½Όΰ½„ΰΌ‹ΰ½”ΰΌ‹ΰ½‰ΰ½²ΰ½‘", 
    "སྙིང་དྷྗེ་ཆེན་པོ",
    "ཀེས་དྷབ"
]

results = stream_translation(texts, "english")

JavaScript with Fetch

async function streamTranslation(texts, targetLanguage) {
  const response = await fetch('/translate/stream', {
    method: 'POST',
    headers: { 'Content-Type': 'application/json' },
    body: JSON.stringify({
      texts: texts,
      target_language: targetLanguage,
      model_name: 'claude-sonnet-4-20250514',
      batch_size: 2
    })
  });

  const reader = response.body.getReader();
  const decoder = new TextDecoder();
  let buffer = '';

  while (true) {
    const { done, value } = await reader.read();
    if (done) break;

    buffer += decoder.decode(value, { stream: true });
    const lines = buffer.split('\n');
    buffer = lines.pop();

    for (const line of lines) {
      if (line.startsWith('data: ')) {
        const event = JSON.parse(line.slice(6));
        
        if (event.type === 'batch_completed') {
          console.log('Batch:', event.batch_results);
          // Update UI here
        }
        
        if (event.type === 'completion') {
          console.log('Done:', event.results.length, 'translations');
        }
      }
    }
  }
}

// Usage
streamTranslation(
  ['ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦', 'ΰ½¦ΰΎŸΰ½Όΰ½„ΰΌ‹ΰ½”ΰΌ‹ΰ½‰ΰ½²ΰ½‘'],
  'english'
);

πŸ“– Glossary Extraction

Extract from Translation Results

import requests

# Step 1: Translate
translation_response = requests.post(
    "http://localhost:8001/translate",
    json={
        "texts": ["ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ‹ΰ½”ΰ½’ΰΌ‹ΰ½–ΰΎ±ΰΌ ΰ½¦ΰΎŸΰ½Όΰ½„ΰΌ‹ΰ½”ΰΌ‹ΰ½‰ΰ½²ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½–ΰ½¦ΰΎ³ΰ½–ΰΌ‹ΰ½”ΰ½’ΰΌ‹ΰ½–ΰΎ±ΰΌ"],
        "target_language": "english",
        "model_name": "claude-sonnet-4-20250514"
    }
)

translation_results = translation_response.json()["results"]

# Step 2: Extract glossary
glossary_response = requests.post(
    "http://localhost:8001/glossary/extract",
    json={
        "items": translation_results,
        "model_name": "claude-sonnet-4-20250514",
        "batch_size": 5
    }
)

glossary = glossary_response.json()

print("Extracted Terms:")
for term in glossary["terms"]:
    print(f"  {term['source_term']} β†’ {term['translated_term']}")

Streaming Glossary Extraction

import requests
import json

def stream_glossary_extraction(items):
    response = requests.post(
        "http://localhost:8001/glossary/extract/stream",
        json={
            "items": items,
            "model_name": "claude-sonnet-4-20250514",
            "batch_size": 3
        },
        stream=True
    )
    
    all_terms = []
    
    for line in response.iter_lines():
        if not line:
            continue
        
        line = line.decode('utf-8')
        if not line.startswith('data: '):
            continue
        
        event = json.loads(line[6:])
        
        if event['type'] == 'glossary_batch_completed':
            print(f"Extracted {len(event['terms'])} terms")
            all_terms.extend(event['terms'])
        
        elif event['type'] == 'completion':
            print(f"Total: {len(event['glossary']['terms'])} unique terms")
    
    return all_terms

βš–οΈ Standardization Workflow

Full Standardization Pipeline

import requests

# Step 1: Translate multiple texts
texts = [
    "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ‹ΰ½”ΰ½’ΰΌ‹ΰ½–ΰΎ±ΰΌ",
    "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½€ΰΎ±ΰ½²ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ‹ΰ½”ΰ½’ΰΌ‹ΰ½–ΰΎ±ΰ½ ΰ½ΌΰΌ",
    "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ"
]

trans_response = requests.post(
    "http://localhost:8001/translate",
    json={
        "texts": texts,
        "target_language": "english",
        "model_name": "claude-sonnet-4-20250514"
    }
)
results = trans_response.json()["results"]

# Step 2: Extract glossary
glossary_response = requests.post(
    "http://localhost:8001/glossary/extract",
    json={"items": results}
)
glossary = glossary_response.json()

# Step 3: Create items with glossary for analysis
items_with_glossary = [
    {
        "original_text": r["original_text"],
        "translated_text": r["translated_text"],
        "glossary": glossary["terms"]
    }
    for r in results
]

# Step 4: Analyze for inconsistencies
analysis_response = requests.post(
    "http://localhost:8001/standardize/analyze",
    json={"items": items_with_glossary}
)
inconsistencies = analysis_response.json()["inconsistent_terms"]

print("Inconsistent terms found:")
for term, translations in inconsistencies.items():
    print(f"  {term}: {translations}")

# Step 5: Apply standardization
standardization_response = requests.post(
    "http://localhost:8001/standardize/apply",
    json={
        "items": items_with_glossary,
        "standardization_pairs": [
            {
                "source_word": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦",
                "standardized_translation": "bodhicitta"
            }
        ],
        "model_name": "claude-sonnet-4-20250514"
    }
)

updated = standardization_response.json()["updated_items"]
print("\nStandardized translations:")
for item in updated:
    print(f"  {item['translated_text']}")

🧠 UCCA Generation

Generate UCCA Graph

import requests
import json

response = requests.post(
    "http://localhost:8001/ucca/generate",
    json={
        "input_text": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½“ΰΌ‹ΰ½ΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ",
        "commentary_1": "This verse describes the bodhisattva's practice.",
        "model_name": "claude-sonnet-4-20250514"
    }
)

result = response.json()
ucca_graph = result["ucca_graph"]

print("UCCA Graph:")
print(json.dumps(ucca_graph, indent=2, ensure_ascii=False))

Batch UCCA with Streaming

import requests
import json

items = [
    {"input_text": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ"},
    {"input_text": "ΰ½¦ΰΎŸΰ½Όΰ½„ΰΌ‹ΰ½”ΰΌ‹ΰ½‰ΰ½²ΰ½‘ΰΌ‹ΰ½’ΰΎŸΰ½Όΰ½‚ΰ½¦ΰΌ"},
    {"input_text": "ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎ’ΰ½Όΰ½˜ΰΌ"}
]

response = requests.post(
    "http://localhost:8001/ucca/generate/stream",
    json={
        "items": items,
        "model_name": "claude-sonnet-4-20250514",
        "batch_size": 2
    },
    stream=True
)

for line in response.iter_lines():
    if line and line.decode().startswith('data: '):
        event = json.loads(line.decode()[6:])
        if event.get('type') == 'ucca_item_completed':
            print(f"Item {event['index']}: UCCA generated")

πŸ“ Gloss Analysis

Generate Gloss

import requests
import json

response = requests.post(
    "http://localhost:8001/gloss/generate",
    json={
        "input_text": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ",
        "model_name": "claude-sonnet-4-20250514"
    }
)

result = response.json()

print(f"Standardized: {result['standardized_text']}")
print(f"Note: {result['note']}")
print(f"Glossary: {result['glossary']}")

analysis = json.loads(result['analysis'])
print("\nWord Analysis:")
for item in analysis:
    print(f"  {item['segment']}: {item['meaning']}")

πŸ”€ Combo-Key Workflows

Basic Workflow

import requests

response = requests.post(
    "http://localhost:8001/workflow/run",
    json={
        "combo_key": "source",
        "input": {
            "source": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ",
            "target_language": "english"
        },
        "model_name": "claude-sonnet-4-20250514"
    }
)

print(response.json()["translation"])

With UCCA and Gloss

import requests

# First generate UCCA and Gloss
ucca_response = requests.post(
    "http://localhost:8001/ucca/generate",
    json={
        "input_text": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½“ΰΌ‹ΰ½ΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ",
        "model_name": "claude-sonnet-4-20250514"
    }
)
ucca = ucca_response.json()["ucca_graph"]

gloss_response = requests.post(
    "http://localhost:8001/gloss/generate",
    json={
        "input_text": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½“ΰΌ‹ΰ½ΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ",
        "model_name": "claude-sonnet-4-20250514"
    }
)
gloss = {"glossary": gloss_response.json()["glossary"]}

# Now run workflow with all context
workflow_response = requests.post(
    "http://localhost:8001/workflow/run",
    json={
        "combo_key": "source+ucca+gloss",
        "input": {
            "source": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½“ΰΌ‹ΰ½ΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ",
            "ucca": ucca,
            "gloss": gloss,
            "target_language": "english"
        },
        "model_name": "claude-sonnet-4-20250514"
    }
)

print(workflow_response.json()["translation"])

With Custom Prompt

import requests

response = requests.post(
    "http://localhost:8001/workflow/run",
    json={
        "combo_key": "source+commentaries1",
        "input": {
            "source": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦",
            "commentaries": [
                "This term refers to the awakened heart-mind dedicated to liberation."
            ],
            "target_language": "english"
        },
        "model_name": "claude-sonnet-4-20250514",
        "custom_prompt": """You are a Buddhist scholar-translator.

Source Text: {source}

Commentary: {commentary1}

Translate the source text into {target_language}. 
Use the commentary to inform your understanding.
Return only the translation, no explanation."""
    }
)

print(response.json()["translation"])

πŸ’¬ Editor Comments

Generate Grounded Commentary

import requests

response = requests.post(
    "http://localhost:8001/editor/comment",
    json={
        "messages": [
            {"role": "user", "content": "Is 'bodhicitta' the right translation here?"},
            {"role": "assistant", "content": "I used 'mind of enlightenment' instead."},
            {"role": "user", "content": "@Comment Please review the terminology."}
        ],
        "references": [
            {
                "type": "glossary",
                "content": "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ (bodhicitta): The mind of enlightenment; the altruistic aspiration to attain Buddhahood for the benefit of all sentient beings."
            },
            {
                "type": "commentary",
                "content": "Shantideva defines bodhicitta as the cause of all happiness."
            }
        ],
        "options": {
            "model_name": "gemini-2.5-pro"
        }
    }
)

result = response.json()
print(f"Comment: {result['comment_text']}")
print(f"Citations: {result['citations_used']}")

Streaming Editor Comment

import requests
import json

response = requests.post(
    "http://localhost:8001/editor/comment/stream",
    json={
        "messages": [
            {"role": "user", "content": "Review this translation @Comment"}
        ],
        "references": [
            {"type": "glossary", "content": "Standard terminology..."}
        ]
    },
    stream=True
)

full_comment = ""
for line in response.iter_lines():
    if line and line.decode().startswith('data: '):
        event = json.loads(line.decode()[6:])
        
        if event.get('type') == 'comment_delta':
            print(event['text'], end='', flush=True)
            full_comment += event['text']
        
        elif event.get('type') == 'completion':
            print("\n\nCitations:", event['citations_used'])

πŸ”„ Full Pipeline

Complete Translation Pipeline

import requests
import json

def full_translation_pipeline(source_text: str, target_language: str = "english"):
    """
    Complete pipeline:
    1. Generate UCCA
    2. Generate Gloss
    3. Translate with full context
    4. Extract glossary
    5. Return all results
    """
    
    model = "claude-sonnet-4-20250514"
    base_url = "http://localhost:8001"
    
    print("Step 1: Generating UCCA...")
    ucca_resp = requests.post(
        f"{base_url}/ucca/generate",
        json={"input_text": source_text, "model_name": model}
    )
    ucca = ucca_resp.json().get("ucca_graph", {})
    print("  βœ“ UCCA generated")
    
    print("Step 2: Generating Gloss...")
    gloss_resp = requests.post(
        f"{base_url}/gloss/generate",
        json={"input_text": source_text, "model_name": model}
    )
    gloss_data = gloss_resp.json()
    gloss = {"glossary": gloss_data.get("glossary", {})}
    print("  βœ“ Gloss generated")
    
    print("Step 3: Translating with full context...")
    workflow_resp = requests.post(
        f"{base_url}/workflow/run",
        json={
            "combo_key": "source+ucca+gloss",
            "input": {
                "source": source_text,
                "ucca": ucca,
                "gloss": gloss,
                "target_language": target_language
            },
            "model_name": model
        }
    )
    translation = workflow_resp.json().get("translation", "")
    print("  βœ“ Translation complete")
    
    print("Step 4: Extracting final glossary...")
    glossary_resp = requests.post(
        f"{base_url}/glossary/extract",
        json={
            "items": [{
                "original_text": source_text,
                "translated_text": translation,
                "metadata": {}
            }],
            "model_name": model
        }
    )
    final_glossary = glossary_resp.json()
    print("  βœ“ Glossary extracted")
    
    return {
        "source": source_text,
        "translation": translation,
        "ucca": ucca,
        "gloss": gloss_data,
        "glossary": final_glossary["terms"]
    }

# Usage
result = full_translation_pipeline(
    "ΰ½–ΰΎ±ΰ½„ΰΌ‹ΰ½†ΰ½΄ΰ½–ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½‘ΰ½”ΰ½ ΰΌ‹ΰ½¦ΰ½Ίΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½“ΰΌ‹ΰ½ΰ½˜ΰ½¦ΰΌ‹ΰ½…ΰ½‘ΰΌ‹ΰ½£ΰΌ‹ΰ½¦ΰΎ™ΰ½²ΰ½„ΰΌ‹ΰ½’ΰΎ—ΰ½ΊΰΌ‹ΰ½–ΰ½¦ΰΎΰΎ±ΰ½Ίΰ½‘ΰΌ"
)

print("\n" + "="*50)
print("RESULTS")
print("="*50)
print(f"\nSource: {result['source']}")
print(f"\nTranslation: {result['translation']}")
print(f"\nGlossary Terms:")
for term in result['glossary']:
    print(f"  {term['source_term']} β†’ {term['translated_term']}")

πŸ’‘ Tips & Best Practices

1. Use Streaming for Long Operations

For batch translations, always prefer streaming endpoints to provide user feedback.

2. Cache Results

The API caches translations and glossaries. Clear cache if you need fresh results:

requests.post("http://localhost:8001/system/clear-cache")

3. Batch Appropriately

  • Use batch_size=5 for balanced performance
  • Larger batches are faster but less responsive
  • Smaller batches provide more frequent updates

4. Choose the Right Model

Use Case Recommended Model
Fast prototyping gemini-2.5-flash
High quality claude-sonnet-4-20250514
Complex reasoning gemini-2.5-pro
Buddhist-specific dharamitra

5. Provide Context

More context = better translations:

  • Use UCCA for grammatically ambiguous texts
  • Use commentaries for philosophical content
  • Use Sanskrit for technical terminology

πŸ”— See Also

Clone this wiki locally