The Problem
On Monday you tested the 3 prompts in ChatGPT. You saw how attribute extraction → template filling → SEO optimization works. But here's reality: you've got 847 products in your catalog with no descriptions. Your copywriter quotes you $50 per description. That's $42,350 and 6 weeks of work. Meanwhile, your competitors are ranking on Google and you're not.
See It Work
Watch the 3 prompts chain together automatically. This is what you'll build to process hundreds of products.
The Code
Three levels: start simple for 10 products, add batch processing for 100s, then scale to 1000s with parallel processing. Pick where you are.
Level 1: Simple Single Product Generator
Good for: 10-50 products | Setup time: 20 minutes
# Simple Product Description Generator (10-50 products) import openai import json from typing import Dict, List openai.api_key = "sk-proj-your-actual-key-here" def generate_product_description(product_data: Dict) -> Dict: """Chain the 3 prompts: extract → generate → optimize""" # Step 1: Extract and structure attributes extraction_prompt = f"""Extract product attributes from this data and format as structured JSON. Include: product_name, category, price, materials, dimensions, colors, features, target_audience, brand_voice, use_cases. Product data: {json.dumps(product_data, indent=2)} Output as valid JSON only.""" response = openai.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": extraction_prompt}], temperature=0.3 ) extracted = json.loads(response.choices[0].message.content) # Step 2: Generate SEO-optimized descriptions generation_prompt = f"""Generate product descriptions for e-commerce: 1. Short description (1-2 sentences, under 160 chars) 2. Long description (3-4 paragraphs, engaging and informative) 3. Bullet points (5-7 key features) 4. SEO keywords (7-10 relevant terms) 5. Meta title (under 60 chars) 6. Meta description (under 160 chars) Product attributes: {json.dumps(extracted, indent=2)} Brand voice: {extracted.get('brand_voice', 'professional and friendly')} Output as JSON with keys: short_description, long_description, bullet_points, seo_keywords, meta_title, meta_description""" response = openai.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": generation_prompt}], temperature=0.7 ) descriptions = json.loads(response.choices[0].message.content) # Step 3: Optimize for search and conversion optimization_prompt = f"""Analyze this product description for SEO and conversion optimization: Description: {descriptions['long_description']} Keywords: {', '.join(descriptions['seo_keywords'])} Provide: 1. Primary keyword (most important for ranking) 2. Secondary keywords (3-5 supporting terms) 3. Keyword density analysis 4. Readability score (0-100) 5. Sentiment (positive/neutral/negative) 6. Recommended call-to-action 7. Urgency elements to add 8. Trust signals to include Output as JSON.""" response = openai.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": optimization_prompt}], temperature=0.3 ) optimization = json.loads(response.choices[0].message.content) return { "extracted_attributes": extracted, "descriptions": descriptions, "seo_optimization": optimization, "status": "complete" } # Usage product_input = { "name": "Organic Cotton Yoga Mat", "price": 89.99, "material": "100% organic cotton, natural rubber base", "dimensions": "72x24x5mm", "colors": ["Ocean Blue", "Forest Green", "Sunset Orange"], "features": "Non-slip, eco-friendly, biodegradable, includes strap" } result = generate_product_description(product_input) print(f"Generated for: {result['extracted_attributes']['product_name']}") print(f"Primary keyword: {result['seo_optimization']['primary_keyword']}") print(f"\nShort description:\n{result['descriptions']['short_description']}")
Level 2: Batch Processing with Templates
Good for: 50-500 products | Setup time: 2 hours
// Batch Processing with Templates (50-500 products) import Anthropic from '@anthropic-ai/sdk'; import * as fs from 'fs'; interface ProductData { name: string; price: number; attributes: Record<string, any>; category: string; } interface DescriptionResult { product_id: string; descriptions: any; seo: any; processing_time: number; } class DescriptionGenerator { private anthropic: Anthropic; private templates: Map<string, string>; private rateLimiter: RateLimiter; constructor(apiKey: string) { this.anthropic = new Anthropic({ apiKey }); this.templates = new Map(); this.rateLimiter = new RateLimiter(50, 60000); // 50 requests per minute this.loadTemplates(); } private loadTemplates() { // Load category-specific templates this.templates.set('yoga', ` You're writing for wellness-conscious consumers who value sustainability. Tone: Calm, inspiring, eco-focused. Keywords to include: organic, eco-friendly, sustainable, natural. `); this.templates.set('electronics', ` You're writing for tech-savvy buyers who want specifications. Tone: Professional, detailed, feature-focused. Keywords to include: specifications, performance, technology, features. `); this.templates.set('fashion', ` You're writing for style-conscious shoppers who follow trends. Tone: Trendy, aspirational, style-focused. Keywords to include: style, fashion, trending, versatile. `); } async generateBatch(products: ProductData[]): Promise<DescriptionResult[]> { const results: DescriptionResult[] = []; const batchSize = 10; for (let i = 0; i < products.length; i += batchSize) { const batch = products.slice(i, i + batchSize); const batchResults = await Promise.all( batch.map((product) => this.processProduct(product)) ); results.push(...batchResults); // Progress logging console.log(`Processed ${Math.min(i + batchSize, products.length)}/${products.length} products`); } return results; } private async processProduct(product: ProductData): Promise<DescriptionResult> { const startTime = Date.now(); await this.rateLimiter.wait(); try { // Get category template const categoryTemplate = this.templates.get(product.category) || ''; // Generate description with retry logic const description = await this.retryWithBackoff(async () => { const response = await this.anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 2048, messages: [ { role: 'user', content: `${categoryTemplate} Generate complete product description JSON for: ${JSON.stringify(product, null, 2)} Include: short_description, long_description, bullet_points, seo_keywords, meta_title, meta_description`, }, ], }); const content = response.content[0]; if (content.type !== 'text') throw new Error('Invalid response'); return JSON.parse(content.text); }, 3); // SEO optimization const seo = await this.optimizeForSEO(description, product); return { product_id: product.name, descriptions: description, seo, processing_time: Date.now() - startTime, }; } catch (error) { console.error(`Failed to process ${product.name}:`, error); throw error; } } private async optimizeForSEO(description: any, product: ProductData): Promise<any> { await this.rateLimiter.wait(); const response = await this.anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 1024, messages: [ { role: 'user', content: `Analyze SEO for: ${description.long_description} Keywords: ${description.seo_keywords.join(', ')} Category: ${product.category} Provide: primary_keyword, secondary_keywords, keyword_density, readability_score`, }, ], }); const content = response.content[0]; if (content.type !== 'text') throw new Error('Invalid response'); return JSON.parse(content.text); } private async retryWithBackoff<T>( fn: () => Promise<T>, maxRetries: number ): Promise<T> { for (let attempt = 0; attempt < maxRetries; attempt++) { try { return await fn(); } catch (error) { if (attempt === maxRetries - 1) throw error; await new Promise((resolve) => setTimeout(resolve, Math.pow(2, attempt) * 1000) ); } } throw new Error('Max retries exceeded'); } async saveResults(results: DescriptionResult[], outputPath: string) { const formatted = results.map((r) => ({ product: r.product_id, short_description: r.descriptions.short_description, long_description: r.descriptions.long_description, bullets: r.descriptions.bullet_points, meta_title: r.descriptions.meta_title, meta_description: r.descriptions.meta_description, primary_keyword: r.seo.primary_keyword, processing_time_ms: r.processing_time, })); fs.writeFileSync(outputPath, JSON.stringify(formatted, null, 2)); console.log(`Saved ${results.length} descriptions to ${outputPath}`); } } class RateLimiter { private tokens: number; private maxTokens: number; private refillRate: number; private lastRefill: number; constructor(maxTokens: number, refillInterval: number) { this.tokens = maxTokens; this.maxTokens = maxTokens; this.refillRate = maxTokens / refillInterval; this.lastRefill = Date.now(); } async wait() { this.refill(); if (this.tokens < 1) { const waitTime = (1 - this.tokens) / this.refillRate; await new Promise((resolve) => setTimeout(resolve, waitTime)); this.refill(); } this.tokens -= 1; } private refill() { const now = Date.now(); const timePassed = now - this.lastRefill; const tokensToAdd = timePassed * this.refillRate; this.tokens = Math.min(this.maxTokens, this.tokens + tokensToAdd); this.lastRefill = now; } } // Usage const generator = new DescriptionGenerator(process.env.ANTHROPIC_API_KEY!); const products: ProductData[] = [ { name: 'Organic Cotton Yoga Mat', price: 89.99, category: 'yoga', attributes: { material: 'organic cotton', size: '72x24' }, }, // ... more products ]; const results = await generator.generateBatch(products); await generator.saveResults(results, './descriptions.json'); console.log(`Generated ${results.length} descriptions`); console.log(`Avg time: ${results.reduce((sum, r) => sum + r.processing_time, 0) / results.length}ms`);
Level 3: Production Pipeline with Shopify Integration
Good for: 500-5,000 products | Setup time: 1 day
# Production Pipeline with Shopify Integration (500-5000 products) from langgraph.graph import Graph, END from typing import TypedDict, List, Dict import openai import shopify import asyncio import aiohttp from concurrent.futures import ThreadPoolExecutor import logging logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) class ProductState(TypedDict): product_id: str raw_data: Dict extracted_attributes: Dict descriptions: Dict seo_optimization: Dict shopify_formatted: Dict status: str retry_count: int errors: List[str] class DescriptionPipeline: def __init__(self, shopify_shop_url: str, shopify_token: str, openai_key: str): self.shopify_session = shopify.Session(shopify_shop_url, '2024-01', shopify_token) shopify.ShopifyResource.activate_session(self.shopify_session) openai.api_key = openai_key self.executor = ThreadPoolExecutor(max_workers=20) self.graph = self.build_pipeline() def extract_node(self, state: ProductState) -> ProductState: """Extract and structure product attributes""" try: response = openai.chat.completions.create( model="gpt-4", messages=[{ "role": "user", "content": f"Extract product attributes as JSON: {state['raw_data']}" }], temperature=0.3 ) state['extracted_attributes'] = json.loads(response.choices[0].message.content) state['status'] = 'extracted' logger.info(f"Extracted: {state['product_id']}") except Exception as e: state['errors'].append(f"Extraction failed: {str(e)}") state['retry_count'] += 1 return state def generate_node(self, state: ProductState) -> ProductState: """Generate descriptions with category-specific templates""" try: category = state['extracted_attributes'].get('category', 'general') template = self.get_category_template(category) response = openai.chat.completions.create( model="gpt-4", messages=[ {"role": "system", "content": template}, { "role": "user", "content": f"Generate complete descriptions: {json.dumps(state['extracted_attributes'])}" } ], temperature=0.7 ) state['descriptions'] = json.loads(response.choices[0].message.content) state['status'] = 'generated' logger.info(f"Generated: {state['product_id']}") except Exception as e: state['errors'].append(f"Generation failed: {str(e)}") state['retry_count'] += 1 return state def optimize_node(self, state: ProductState) -> ProductState: """SEO optimization and keyword analysis""" try: response = openai.chat.completions.create( model="gpt-4", messages=[{ "role": "user", "content": f"""Optimize for SEO: Description: {state['descriptions']['long_description']} Keywords: {state['descriptions']['seo_keywords']} Provide: primary_keyword, secondary_keywords, keyword_density, readability_score, cta""" }], temperature=0.3 ) state['seo_optimization'] = json.loads(response.choices[0].message.content) state['status'] = 'optimized' logger.info(f"Optimized: {state['product_id']}") except Exception as e: state['errors'].append(f"Optimization failed: {str(e)}") state['retry_count'] += 1 return state def format_shopify_node(self, state: ProductState) -> ProductState: """Format for Shopify API""" try: state['shopify_formatted'] = { 'product': { 'id': state['product_id'], 'body_html': state['descriptions']['long_description'], 'title': state['extracted_attributes']['product_name'], 'product_type': state['extracted_attributes']['category'], 'tags': ', '.join(state['descriptions']['seo_keywords']), 'metafields': [ { 'namespace': 'seo', 'key': 'meta_title', 'value': state['descriptions']['meta_title'], 'type': 'single_line_text_field' }, { 'namespace': 'seo', 'key': 'meta_description', 'value': state['descriptions']['meta_description'], 'type': 'single_line_text_field' } ] } } state['status'] = 'formatted' logger.info(f"Formatted: {state['product_id']}") except Exception as e: state['errors'].append(f"Formatting failed: {str(e)}") state['retry_count'] += 1 return state def upload_shopify_node(self, state: ProductState) -> ProductState: """Upload to Shopify""" try: product = shopify.Product.find(state['product_id']) product.body_html = state['shopify_formatted']['product']['body_html'] product.tags = state['shopify_formatted']['product']['tags'] # Update metafields for metafield_data in state['shopify_formatted']['product']['metafields']: metafield = shopify.Metafield(metafield_data) metafield.owner_id = state['product_id'] metafield.owner_resource = 'product' metafield.save() product.save() state['status'] = 'uploaded' logger.info(f"Uploaded: {state['product_id']}") except Exception as e: state['errors'].append(f"Upload failed: {str(e)}") state['retry_count'] += 1 return state def check_status(self, state: ProductState) -> str: """Route based on status and retries""" if state['status'] == 'uploaded': return "complete" elif state['retry_count'] >= 3: return "failed" elif len(state['errors']) > 0: return "retry" else: return "continue" def get_category_template(self, category: str) -> str: """Get category-specific writing template""" templates = { 'yoga': "You're writing for wellness-conscious consumers. Tone: Calm, inspiring, eco-focused.", 'electronics': "You're writing for tech buyers. Tone: Professional, detailed, spec-focused.", 'fashion': "You're writing for style shoppers. Tone: Trendy, aspirational, style-focused.", 'home': "You're writing for home decorators. Tone: Cozy, practical, design-focused." } return templates.get(category, "You're writing product descriptions. Tone: Professional, engaging.") def build_pipeline(self): """Build the LangGraph pipeline""" graph = Graph() # Add nodes graph.add_node("extract", self.extract_node) graph.add_node("generate", self.generate_node) graph.add_node("optimize", self.optimize_node) graph.add_node("format_shopify", self.format_shopify_node) graph.add_node("upload_shopify", self.upload_shopify_node) # Add edges graph.set_entry_point("extract") graph.add_edge("extract", "generate") graph.add_edge("generate", "optimize") graph.add_edge("optimize", "format_shopify") graph.add_edge("format_shopify", "upload_shopify") # Conditional routing graph.add_conditional_edges( "upload_shopify", self.check_status, { "complete": END, "failed": END, "retry": "extract", "continue": "upload_shopify" } ) return graph.compile() async def process_batch(self, product_ids: List[str]) -> List[ProductState]: """Process multiple products in parallel""" results = [] # Create initial states tasks = [] for product_id in product_ids: # Fetch product from Shopify product = shopify.Product.find(product_id) initial_state = { 'product_id': product_id, 'raw_data': product.to_dict(), 'extracted_attributes': {}, 'descriptions': {}, 'seo_optimization': {}, 'shopify_formatted': {}, 'status': 'pending', 'retry_count': 0, 'errors': [] } # Process through pipeline task = asyncio.create_task(self.process_single(initial_state)) tasks.append(task) # Wait for all to complete results = await asyncio.gather(*tasks) # Log summary successful = len([r for r in results if r['status'] == 'uploaded']) failed = len([r for r in results if r['status'] == 'failed']) logger.info(f"Batch complete: {successful} successful, {failed} failed") return results async def process_single(self, state: ProductState) -> ProductState: """Process single product through pipeline""" return self.graph.invoke(state) # Usage pipeline = DescriptionPipeline( shopify_shop_url="your-store.myshopify.com", shopify_token="shpat_your_token", openai_key="sk-proj-your-key" ) # Get all products without descriptions products = shopify.Product.find(fields='id', body_html=None) product_ids = [p.id for p in products] # Process in batches of 50 batch_size = 50 for i in range(0, len(product_ids), batch_size): batch = product_ids[i:i+batch_size] results = asyncio.run(pipeline.process_batch(batch)) print(f"Processed batch {i//batch_size + 1}: {len(results)} products") print(f"Complete! Processed {len(product_ids)} products")
When to Level Up
Start: Single Product Generator
10-50 products
- Sequential API calls for individual products
- Basic error logging with print statements
- Manual review and editing of each description
- Copy-paste into Shopify admin
Scale: Batch Processing
50-500 products
- Parallel processing with rate limiting (50 req/min)
- Category-specific templates for consistent voice
- Automatic retries with exponential backoff
- CSV/JSON output for bulk import
- Progress tracking and error reporting
Production: Full Pipeline
500-5,000 products
- LangGraph workflow with conditional routing
- Direct Shopify API integration (auto-upload)
- SEO keyword analysis with SEMrush integration
- A/B testing different description variations
- Automated metafield updates for SEO
- Human review queue for edge cases
Enterprise: Multi-Store System
5,000+ products
- Multi-store support (Shopify, WooCommerce, BigCommerce)
- Multi-language generation (localization)
- Real-time inventory sync and description updates
- ML-powered performance tracking (conversion rates per description)
- Auto-regeneration based on poor performance
- Distributed processing with message queues (RabbitMQ)
- Custom brand voice training on historical data
E-commerce-Specific Gotchas
The code examples work. But e-commerce has unique challenges you need to handle for production.
Duplicate Content Penalties from Google
If you generate similar descriptions for similar products, Google will penalize you. You need variation. Add product-specific details, use different sentence structures, rotate synonyms.
def ensure_uniqueness(base_description: str, product_attributes: dict) -> str: """Add product-specific details to avoid duplicate content""" # Extract unique attributes unique_details = [] if 'color' in product_attributes: unique_details.append(f"Available in {product_attributes['color']}") if 'size' in product_attributes: unique_details.append(f"Size: {product_attributes['size']}") if 'material' in product_attributes: unique_details.append(f"Made from {product_attributes['material']}") # Inject unique details into description sentences = base_description.split('. ') insert_position = len(sentences) // 2 sentences.insert(insert_position, '. '.join(unique_details)) return '. '.join(sentences) # Check similarity before saving from difflib import SequenceMatcher def check_similarity(new_desc: str, existing_descs: list) -> float: """Return max similarity score (0-1)""" max_similarity = 0 for existing in existing_descs: similarity = SequenceMatcher(None, new_desc, existing).ratio() max_similarity = max(max_similarity, similarity) return max_similarity # Reject if too similar if check_similarity(new_description, existing_descriptions) > 0.85: # Regenerate with more variation new_description = regenerate_with_variation(product_data)
SEO Keyword Stuffing vs Natural Language
LLMs can over-optimize and stuff keywords unnaturally. You need to balance SEO with readability. Aim for 1-2% keyword density max.
function analyzeKeywordDensity(text: string, keywords: string[]): Record<string, number> { const words = text.toLowerCase().split(/\s+/); const totalWords = words.length; const density: Record<string, number> = {}; keywords.forEach((keyword) => { const keywordWords = keyword.toLowerCase().split(/\s+/); let count = 0; // Count keyword occurrences for (let i = 0; i <= words.length - keywordWords.length; i++) { const slice = words.slice(i, i + keywordWords.length).join(' '); if (slice === keyword.toLowerCase()) { count++; } } density[keyword] = (count / totalWords) * 100; }); return density; } // Validate before saving function validateSEO(description: string, keywords: string[]): boolean { const density = analyzeKeywordDensity(description, keywords); // Check if any keyword exceeds 2% density for (const [keyword, percent] of Object.entries(density)) { if (percent > 2.0) { console.warn(`Keyword "${keyword}" density too high: ${percent.toFixed(2)}%`); return false; } } return true; } // Regenerate if keyword stuffing detected if (!validateSEO(description, keywords)) { // Add instruction to reduce keyword usage description = await regenerate({ ...productData, instruction: 'Use keywords naturally, max 1-2% density', }); }
Product Variant Descriptions (Size/Color Variations)
Don't generate separate descriptions for each size/color variant. Generate one master description and programmatically insert variant-specific details.
def generate_variant_descriptions(master_product: dict, variants: list) -> list: """Generate descriptions for all variants from master""" # Generate master description once master_description = generate_description(master_product) variant_descriptions = [] for variant in variants: # Clone master variant_desc = master_description.copy() # Customize for variant variant_details = [] if 'color' in variant: variant_details.append(f"This {variant['color']} version") if 'size' in variant: variant_details.append(f"in size {variant['size']}") # Insert variant details variant_desc['short_description'] = f"{' '.join(variant_details)} - {variant_desc['short_description']}" # Update meta tags variant_desc['meta_title'] = f"{master_product['name']} - {variant['color']} {variant['size']}" variant_descriptions.append({ 'variant_id': variant['id'], 'description': variant_desc }) return variant_descriptions # Usage master = {'name': 'Organic Yoga Mat', 'features': [...]} variants = [ {'id': '001', 'color': 'Ocean Blue', 'size': 'Standard'}, {'id': '002', 'color': 'Forest Green', 'size': 'Standard'}, {'id': '003', 'color': 'Sunset Orange', 'size': 'Extra Long'} ] all_descriptions = generate_variant_descriptions(master, variants) print(f"Generated {len(all_descriptions)} variant descriptions from 1 master")
Shopify API Rate Limits (2 req/sec, burst 40)
Shopify allows 2 requests per second with a burst of 40. If you hit the limit, you get 429 errors. Implement token bucket rate limiting.
class ShopifyRateLimiter { private tokens: number = 40; // Burst capacity private maxTokens: number = 40; private refillRate: number = 2; // 2 tokens per second private lastRefill: number = Date.now(); async waitForToken(): Promise<void> { this.refill(); if (this.tokens < 1) { const waitTime = (1 - this.tokens) / this.refillRate * 1000; await new Promise((resolve) => setTimeout(resolve, waitTime)); this.refill(); } this.tokens -= 1; } private refill(): void { const now = Date.now(); const timePassed = (now - this.lastRefill) / 1000; const tokensToAdd = timePassed * this.refillRate; this.tokens = Math.min(this.maxTokens, this.tokens + tokensToAdd); this.lastRefill = now; } } // Usage with Shopify API const rateLimiter = new ShopifyRateLimiter(); async function updateProduct(productId: string, description: string) { await rateLimiter.waitForToken(); try { const response = await fetch( `https://your-store.myshopify.com/admin/api/2024-01/products/${productId}.json`, { method: 'PUT', headers: { 'X-Shopify-Access-Token': process.env.SHOPIFY_TOKEN!, 'Content-Type': 'application/json', }, body: JSON.stringify({ product: { body_html: description } }), } ); if (response.status === 429) { // Rate limit hit - wait and retry await new Promise((resolve) => setTimeout(resolve, 2000)); return updateProduct(productId, description); } return response.json(); } catch (error) { console.error(`Failed to update product ${productId}:`, error); throw error; } }
Multi-Language Support for International Stores
If you sell internationally, you need descriptions in multiple languages. Don't just translate - localize for cultural context and local SEO keywords.
def generate_multilingual_descriptions(product: dict, target_languages: list) -> dict: """Generate culturally-adapted descriptions for each language""" descriptions = {} for lang in target_languages: # Get language-specific context context = get_language_context(lang) prompt = f"""Generate product description in {lang} for {context['market']}: Product: {json.dumps(product)} Cultural considerations: - {context['cultural_notes']} - Popular keywords: {context['seo_keywords']} - Tone: {context['preferred_tone']} Generate: short_description, long_description, bullet_points, local_seo_keywords""" response = openai.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": prompt}], temperature=0.7 ) descriptions[lang] = json.loads(response.choices[0].message.content) return descriptions def get_language_context(language: str) -> dict: """Get cultural and SEO context for language""" contexts = { 'es': { 'market': 'Spanish-speaking markets (Spain, Latin America)', 'cultural_notes': 'Emphasize family values, quality, durability', 'seo_keywords': ['orgánico', 'sostenible', 'natural'], 'preferred_tone': 'warm, personal, family-oriented' }, 'de': { 'market': 'German market', 'cultural_notes': 'Emphasize precision, quality, engineering', 'seo_keywords': ['qualität', 'nachhaltig', 'premium'], 'preferred_tone': 'professional, detailed, quality-focused' }, 'ja': { 'market': 'Japanese market', 'cultural_notes': 'Emphasize craftsmanship, attention to detail', 'seo_keywords': ['品質', '持続可能', '職人技'], 'preferred_tone': 'respectful, detailed, quality-focused' } } return contexts.get(language, contexts['es']) # Usage multilingual = generate_multilingual_descriptions( product_data, target_languages=['es', 'de', 'ja', 'fr'] ) for lang, desc in multilingual.items(): print(f"{lang}: {desc['short_description']}")
Cost Calculator
Manual Process
Limitations:
- • 6 weeks delivery time
- • Inconsistent tone across writers
- • No SEO optimization
- • Expensive for variants
- • Doesn't scale for new products
Automated Process
Benefits:
- ✓ Complete in 2-3 hours (not 6 weeks)
- ✓ Consistent brand voice
- ✓ Built-in SEO optimization
- ✓ Easy to regenerate variants
- ✓ Scales to unlimited products