The Problem
On Monday you tested the 3 prompts in ChatGPT. You saw how template → variation → polish works. But here's the thing: you can't ask your team to copy-paste property details 100 times per day. They'll burn out, make mistakes, and you'll miss listing deadlines.
See It Work
Watch the 3 prompts chain together automatically. This is what you'll build.
The Code
Three levels: start simple, add reliability, then scale to production. Pick where you are.
Level 1: Simple API Calls
Good for: 0-50 listings/day | Setup time: 20 minutes
# Simple API Calls (0-50 listings/day) import openai import json import os openai.api_key = os.getenv('OPENAI_API_KEY') def automate_listing_generation(property_data: str) -> dict: """Chain the 3 prompts: extract → variations → optimize""" # Step 1: Extract and structure property data extraction_prompt = f"""Extract property details from this listing text and format as JSON. Include: mls_number, property_type, bedrooms, bathrooms, square_feet, year_built, lot_size, price, location (city, state, neighborhood), features (list), highlights (list). Property text: {property_data} Output as valid JSON only.""" response = openai.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": extraction_prompt}], temperature=0.2 ) extracted = json.loads(response.choices[0].message.content) # Step 2: Generate style variations variation_prompt = f"""Generate 3 listing description variations for this property: 1. Luxury style (sophisticated, high-end language) 2. Family-friendly style (warm, welcoming, kid-focused) 3. Investor-focused style (ROI, market data, numbers) Property data: {json.dumps(extracted)} For each variation include: - style name - headline (10-15 words) - description (100-150 words) - call_to_action (one sentence) Output as JSON array.""" response = openai.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": variation_prompt}], temperature=0.7 ) variations = json.loads(response.choices[0].message.content) # Step 3: Optimize for MLS (use luxury variation as base) optimize_prompt = f"""Optimize this listing description for MLS: Original: {variations[0]['description']} Requirements: - Maximum 300 characters for public remarks - Include key features and selling points - Use action words and emotional triggers - Ensure MLS compliance (no discriminatory language) - Add SEO keywords for online syndication Output as JSON with: - public_remarks (optimized text) - seo_keywords (array of 8-10 keywords) - character_count - mls_compliant (boolean) - readability_score (1-10) - emotional_triggers (array of phrases used)""" response = openai.chat.completions.create( model="gpt-4", messages=[{"role": "user", "content": optimize_prompt}], temperature=0.4 ) optimized = json.loads(response.choices[0].message.content) return { "extracted": extracted, "variations": variations, "optimized": optimized, "ready_for_mls": optimized['mls_compliant'] } # Usage property_text = """3BR colonial, 2400sf, renovated kitchen 2022, hardwood floors, finished basement, 0.5ac, quiet cul-de-sac, top schools, $749k, MLS#12345678, Boston MA""" result = automate_listing_generation(property_text) print(f"Generated {len(result['variations'])} variations") print(f"MLS ready: {result['ready_for_mls']}") print(f"\nOptimized listing ({result['optimized']['character_count']} chars):") print(result['optimized']['public_remarks'])
Level 2: Bulk Processing with Error Handling
Good for: 50-500 listings/day | Setup time: 2 hours
// Bulk Processing with Error Handling (50-500 listings/day) import Anthropic from '@anthropic-ai/sdk'; import * as fs from 'fs'; interface PropertyData { raw_text: string; mls_number?: string; } interface ListingResult { mls_number: string; extracted: any; variations: any[]; optimized: any; status: 'success' | 'failed'; error?: string; } class ListingAutomation { private anthropic: Anthropic; private maxRetries = 3; private batchSize = 10; constructor() { this.anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY!, }); } async processBatch(properties: PropertyData[]): Promise<ListingResult[]> { const results: ListingResult[] = []; // Process in batches to avoid rate limits for (let i = 0; i < properties.length; i += this.batchSize) { const batch = properties.slice(i, i + this.batchSize); const batchResults = await Promise.allSettled( batch.map((prop) => this.processProperty(prop)) ); batchResults.forEach((result, index) => { if (result.status === 'fulfilled') { results.push(result.value); } else { results.push({ mls_number: batch[index].mls_number || 'unknown', extracted: {}, variations: [], optimized: {}, status: 'failed', error: result.reason.message, }); } }); // Rate limiting: wait between batches if (i + this.batchSize < properties.length) { await this.delay(2000); } } return results; } private async processProperty(property: PropertyData): Promise<ListingResult> { try { // Step 1: Extract with retry const extracted = await this.retryWithBackoff(async () => { const response = await this.anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 2048, messages: [ { role: 'user', content: `Extract property details as JSON: ${property.raw_text}`, }, ], }); const content = response.content[0]; if (content.type !== 'text') throw new Error('Invalid response'); return JSON.parse(content.text); }); // Step 2: Generate variations const variations = await this.retryWithBackoff(async () => { const response = await this.anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 3072, messages: [ { role: 'user', content: `Generate 3 style variations (luxury, family, investor) for: ${JSON.stringify(extracted)}`, }, ], }); const content = response.content[0]; if (content.type !== 'text') throw new Error('Invalid response'); return JSON.parse(content.text); }); // Step 3: Optimize for MLS const optimized = await this.retryWithBackoff(async () => { const response = await this.anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 1024, messages: [ { role: 'user', content: `Optimize for MLS (300 char max): ${variations[0].description}`, }, ], }); const content = response.content[0]; if (content.type !== 'text') throw new Error('Invalid response'); return JSON.parse(content.text); }); return { mls_number: extracted.mls_number || property.mls_number || 'unknown', extracted, variations, optimized, status: 'success', }; } catch (error) { throw new Error(`Failed to process property: ${error}`); } } private async retryWithBackoff<T>( fn: () => Promise<T>, retries = this.maxRetries ): Promise<T> { let lastError: Error | null = null; for (let attempt = 0; attempt < retries; attempt++) { try { return await Promise.race([ fn(), new Promise<never>((_, reject) => setTimeout(() => reject(new Error('Timeout')), 45000) ), ]); } catch (error) { lastError = error as Error; if (attempt < retries - 1) { const backoff = Math.pow(2, attempt) * 1000; await this.delay(backoff); } } } throw lastError; } private delay(ms: number): Promise<void> { return new Promise((resolve) => setTimeout(resolve, ms)); } async exportToCSV(results: ListingResult[], filename: string): Promise<void> { const csv = results.map((r) => ({ mls_number: r.mls_number, status: r.status, bedrooms: r.extracted.bedrooms || '', price: r.extracted.price || '', luxury_headline: r.variations[0]?.headline || '', optimized_text: r.optimized.public_remarks || '', character_count: r.optimized.character_count || 0, error: r.error || '', })); const header = Object.keys(csv[0]).join(','); const rows = csv.map((row) => Object.values(row).join(',')); const content = [header, ...rows].join('\n'); fs.writeFileSync(filename, content); } } // Usage const automation = new ListingAutomation(); const properties: PropertyData[] = [ { raw_text: '3BR colonial, 2400sf, $749k, Boston MA', mls_number: '12345678' }, { raw_text: '4BR ranch, 3200sf, $899k, Cambridge MA', mls_number: '87654321' }, // ... load from CSV or database ]; const results = await automation.processBatch(properties); const successCount = results.filter((r) => r.status === 'success').length; console.log(`Processed ${results.length} listings`); console.log(`Success: ${successCount}, Failed: ${results.length - successCount}`); await automation.exportToCSV(results, 'listings_output.csv');
Level 3: Production with MLS Integration
Good for: 500+ listings/day | Setup time: 1 day
# Production with MLS Integration (500+ listings/day) from langgraph.graph import Graph, END from typing import TypedDict, List import openai import requests import logging import redis from datetime import datetime logger = logging.getLogger(__name__) class ListingState(TypedDict): property_data: dict extracted: dict variations: List[dict] optimized: dict mls_response: dict status: str retry_count: int class MLSIntegration: """Handle MLS API interactions""" def __init__(self, api_key: str, api_url: str): self.api_key = api_key self.api_url = api_url self.cache = redis.Redis(host='localhost', port=6379, db=0) def push_listing(self, listing_data: dict) -> dict: """Push listing to MLS system""" headers = { 'Authorization': f'Bearer {self.api_key}', 'Content-Type': 'application/json' } # Format for MLS API (RESO standard) mls_payload = { 'ListingKey': listing_data['mls_number'], 'ListPrice': listing_data['price'], 'BedroomsTotal': listing_data['bedrooms'], 'BathroomsTotalInteger': listing_data['bathrooms'], 'LivingArea': listing_data['square_feet'], 'PublicRemarks': listing_data['public_remarks'], 'PropertyType': listing_data['property_type'], 'City': listing_data['location']['city'], 'StateOrProvince': listing_data['location']['state'], 'ModificationTimestamp': datetime.utcnow().isoformat(), } try: response = requests.post( f"{self.api_url}/listings", headers=headers, json=mls_payload, timeout=30 ) response.raise_for_status() # Cache successful push cache_key = f"listing:{listing_data['mls_number']}" self.cache.setex(cache_key, 3600, 'pushed') return {'success': True, 'listing_id': response.json()['id']} except requests.exceptions.RequestException as e: logger.error(f"MLS push failed: {e}") return {'success': False, 'error': str(e)} def extract_node(state: ListingState) -> ListingState: """Extract property data""" response = openai.chat.completions.create( model="gpt-4", messages=[{ "role": "user", "content": f"Extract property details as JSON: {state['property_data']}" }], temperature=0.2 ) state['extracted'] = json.loads(response.choices[0].message.content) return state def generate_variations_node(state: ListingState) -> ListingState: """Generate style variations""" response = openai.chat.completions.create( model="gpt-4", messages=[{ "role": "user", "content": f"Generate 3 variations for: {json.dumps(state['extracted'])}" }], temperature=0.7 ) state['variations'] = json.loads(response.choices[0].message.content) return state def optimize_node(state: ListingState) -> ListingState: """Optimize for MLS""" response = openai.chat.completions.create( model="gpt-4", messages=[{ "role": "user", "content": f"Optimize for MLS (300 char): {state['variations'][0]['description']}" }], temperature=0.4 ) state['optimized'] = json.loads(response.choices[0].message.content) return state def mls_push_node(state: ListingState) -> ListingState: """Push to MLS system""" mls = MLSIntegration( api_key=os.getenv('MLS_API_KEY'), api_url=os.getenv('MLS_API_URL') ) listing_data = { **state['extracted'], 'public_remarks': state['optimized']['public_remarks'] } result = mls.push_listing(listing_data) state['mls_response'] = result state['status'] = 'success' if result['success'] else 'failed' return state def check_mls_success(state: ListingState) -> str: """Route based on MLS push result""" if state['status'] == 'success': return "complete" elif state['retry_count'] >= 3: logger.error(f"Max retries reached for {state['extracted'].get('mls_number')}") return "complete" else: state['retry_count'] += 1 return "retry" def build_listing_graph(): """Build production listing automation graph""" graph = Graph() # Add nodes graph.add_node("extract", extract_node) graph.add_node("generate_variations", generate_variations_node) graph.add_node("optimize", optimize_node) graph.add_node("push_to_mls", mls_push_node) # Add edges graph.set_entry_point("extract") graph.add_edge("extract", "generate_variations") graph.add_edge("generate_variations", "optimize") graph.add_edge("optimize", "push_to_mls") graph.add_conditional_edges( "push_to_mls", check_mls_success, { "complete": END, "retry": "push_to_mls" } ) return graph.compile() # Usage listing_graph = build_listing_graph() initial_state = { "property_data": "3BR colonial, 2400sf, $749k, Boston MA", "extracted": {}, "variations": [], "optimized": {}, "mls_response": {}, "status": "pending", "retry_count": 0 } result = listing_graph.invoke(initial_state) print(f"Listing status: {result['status']}") if result['status'] == 'success': print(f"MLS Listing ID: {result['mls_response']['listing_id']}")
When to Level Up
Start: Simple API Calls
0-50 listings/day
- Sequential API calls for single listings
- Basic console logging for debugging
- Manual review of each generated listing
- Copy-paste to MLS manually
Scale: Batch Processing
50-500 listings/day
- Bulk processing with batching (10 at a time)
- Automatic retries with exponential backoff
- CSV import/export for listing data
- Error tracking and failed listing reports
- Rate limiting to avoid API throttling
Production: MLS Integration
500-1,000 listings/day
- Direct MLS API integration (RESO standard)
- LangGraph workflow with conditional routing
- Redis caching for duplicate detection
- Automated quality checks before MLS push
- Webhook notifications for listing status
Enterprise: Multi-Platform System
1,000+ listings/day
- Multi-MLS syndication (push to 5+ platforms simultaneously)
- A/B testing for listing descriptions (track which styles convert)
- Image generation and optimization (AI-generated staging photos)
- SEO optimization and keyword tracking across platforms
- Real-time analytics dashboard (views, leads, conversions by listing)
- CRM integration (Salesforce, HubSpot) for lead routing
Real Estate-Specific Gotchas
The code examples above work. But real estate has unique challenges you need to handle.
Fair Housing Compliance - No Discriminatory Language
Federal Fair Housing Act prohibits discriminatory language. Can't mention race, religion, family status, disability. LLMs sometimes generate phrases like 'perfect for young families' or 'great for retirees' which violate FHA. You need automated compliance checking.
import re FORBIDDEN_PHRASES = [ r'\b(young|old|elderly|senior|retired)\s+(family|families|couple|person)', r'\b(perfect|ideal|great)\s+for\s+(families|singles|couples)', r'\b(master|maid|handicap)\s+(bedroom|suite|accessible)', r'\b(church|temple|mosque|synagogue)\s+(nearby|walking distance)', r'\b(adults? only|no (children|kids))\b', ] def check_fair_housing_compliance(text: str) -> dict: """Scan for Fair Housing violations""" violations = [] for pattern in FORBIDDEN_PHRASES: matches = re.finditer(pattern, text, re.IGNORECASE) for match in matches: violations.append({ 'phrase': match.group(0), 'position': match.start(), 'reason': 'Potential Fair Housing violation' }) return { 'compliant': len(violations) == 0, 'violations': violations } # Use before publishing result = check_fair_housing_compliance(listing_text) if not result['compliant']: print(f"WARNING: {len(result['violations'])} violations found") # Auto-regenerate or flag for manual review
MLS Character Limits - Strict Truncation Rules
Most MLS systems have strict character limits: 300 for public remarks, 50 for headlines. But you can't just truncate mid-sentence. Need smart truncation that preserves meaning and adds ellipsis properly.
function smartTruncate(text: string, maxLength: number): string { if (text.length <= maxLength) return text; // Try to break at sentence boundary const sentences = text.match(/[^.!?]+[.!?]+/g) || []; let truncated = ''; for (const sentence of sentences) { if ((truncated + sentence).length <= maxLength - 3) { truncated += sentence; } else { break; } } // If no complete sentences fit, break at word boundary if (!truncated) { const words = text.split(' '); while (words.length > 0) { const attempt = words.join(' '); if (attempt.length <= maxLength - 3) { truncated = attempt; break; } words.pop(); } } return truncated.trim() + '...'; } // Usage const longDescription = generateListing(propertyData); const mlsCompliant = smartTruncate(longDescription, 300); console.log(`Truncated from ${longDescription.length} to ${mlsCompliant.length} chars`);
Property Type Mapping - MLS vs LLM Terminology
LLMs use casual terms ('colonial', 'ranch'), but MLS systems require specific codes ('SFR', 'CON', 'RAN'). Need mapping tables and validation to ensure data integrity.
PROPERTY_TYPE_MAPPING = { 'colonial': 'CON', 'ranch': 'RAN', 'cape cod': 'CAP', 'contemporary': 'CTM', 'victorian': 'VIC', 'condo': 'CDO', 'townhouse': 'TWN', 'multi-family': 'MFH', 'single family': 'SFR', 'land': 'LND', } def normalize_property_type(llm_type: str) -> str: """Convert LLM output to MLS code""" llm_type_lower = llm_type.lower().strip() # Direct match if llm_type_lower in PROPERTY_TYPE_MAPPING: return PROPERTY_TYPE_MAPPING[llm_type_lower] # Fuzzy match (handle variations) for key, code in PROPERTY_TYPE_MAPPING.items(): if key in llm_type_lower or llm_type_lower in key: return code # Default fallback return 'SFR' # Usage in extraction step extracted = extract_property_data(text) extracted['property_type_code'] = normalize_property_type( extracted['property_type'] )
Image URL Validation - Broken Links Kill Listings
Generated listings often reference property images, but URLs break, expire, or point to wrong properties. Need validation before MLS push to avoid rejected listings.
import axios from 'axios'; import sharp from 'sharp'; interface ImageValidation { url: string; valid: boolean; width?: number; height?: number; size_kb?: number; error?: string; } async function validatePropertyImages( imageUrls: string[] ): Promise<ImageValidation[]> { const results = await Promise.all( imageUrls.map(async (url) => { try { const response = await axios.get(url, { responseType: 'arraybuffer', timeout: 10000, maxContentLength: 10 * 1024 * 1024, // 10MB max }); const buffer = Buffer.from(response.data); const metadata = await sharp(buffer).metadata(); // MLS requirements: min 800px wide, max 5MB const valid = metadata.width! >= 800 && buffer.length <= 5 * 1024 * 1024; return { url, valid, width: metadata.width, height: metadata.height, size_kb: Math.round(buffer.length / 1024), error: valid ? undefined : 'Does not meet MLS requirements', }; } catch (error) { return { url, valid: false, error: `Failed to load: ${error}`, }; } }) ); return results; } // Usage before MLS push const imageValidation = await validatePropertyImages(listing.image_urls); const validImages = imageValidation.filter((img) => img.valid); if (validImages.length < 3) { throw new Error('Insufficient valid images for MLS listing'); }
Price Formatting - Regional Variations
Different regions format prices differently. US uses $749,000, UK uses £749,000, EU uses €749.000. LLMs sometimes mix formats. Need standardization based on property location.
from babel.numbers import format_currency import locale REGION_CURRENCY = { 'US': 'USD', 'CA': 'CAD', 'GB': 'GBP', 'AU': 'AUD', 'NZ': 'NZD', 'EU': 'EUR', } def format_listing_price(price: float, country_code: str) -> str: """Format price according to regional standards""" currency = REGION_CURRENCY.get(country_code, 'USD') # Format with proper currency symbol and separators formatted = format_currency( price, currency, locale=f'en_{country_code}' ) return formatted def parse_price_from_text(text: str) -> float: """Extract numeric price from LLM output""" import re # Remove currency symbols and separators cleaned = re.sub(r'[^0-9.]', '', text) try: return float(cleaned) except ValueError: return 0.0 # Usage llm_price = "$749,000" # From LLM numeric_price = parse_price_from_text(llm_price) formatted_price = format_listing_price(numeric_price, 'US') print(f"Standardized: {formatted_price}") # $749,000.00
Cost Calculator
Manual Process
Limitations:
- • Limited to 10-15 listings per day
- • High error rate from manual data entry
- • Inconsistent quality across listings
- • No A/B testing or optimization
Automated Process
Benefits:
- ✓ Process 50+ listings in 30 minutes
- ✓ Consistent quality and formatting
- ✓ Automatic Fair Housing compliance
- ✓ A/B testing for better conversions