The Problem
On Monday you tested the 3 prompts in ChatGPT. You saw how goal extraction → progress validation → insight generation works. But here's the reality: your OKRs live in a deck from January. Your team updates a spreadsheet once a month. By the time you realize you're off track, the quarter's over. You can't ask your strategy team to manually check 47 goals across 8 departments every week.
See It Work
Watch the 3 prompts chain together automatically. This is what you'll build.
The Code
Three levels: start simple, add real-time updates, then scale to full dashboards. Pick where you are.
Level 1: Simple API Calls
Good for: Weekly manual updates | Setup time: 30 minutes
// Simple OKR Tracking (Weekly manual updates) import Anthropic from '@anthropic-ai/sdk'; import { Client } from '@notionhq/client'; interface OKRAnalysis { objectives: any[]; risks: any[]; insights: any[]; recommended_actions: any[]; } async function analyzeOKRProgress( okrText: string, currentData: string ): Promise<OKRAnalysis> { const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY!, }); // Step 1: Extract and structure OKRs const extractionPrompt = `Extract OKRs and current progress from this text and format as JSON. Include: objectives (with id, title, key_results array), quarter, week, total_weeks. For each key result: id, description, target, current, unit, progress_percent, status. OKR text: ${okrText} Current data: ${currentData} Output as valid JSON only.`; const extractResponse = await anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 2048, messages: [{ role: 'user', content: extractionPrompt }], }); const content = extractResponse.content[0]; if (content.type !== 'text') throw new Error('Invalid response'); const extracted = JSON.parse(content.text); // Step 2: Validate progress and identify risks const validationPrompt = `Analyze this OKR progress data and identify risks. Return JSON with: overall_health, on_track_count, at_risk_count, risks array. For each risk: kr_id, description, severity, reason, recommended_action. OKR data: ${JSON.stringify(extracted, null, 2)}`; const validationResponse = await anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 2048, messages: [{ role: 'user', content: validationPrompt }], }); const validationContent = validationResponse.content[0]; if (validationContent.type !== 'text') throw new Error('Invalid response'); const validation = JSON.parse(validationContent.text); // Step 3: Generate strategic insights const insightPrompt = `Generate strategic insights and recommendations based on this OKR analysis. Return JSON with: executive_summary, key_insights array, recommended_actions array, forecast. OKR progress: ${JSON.stringify(extracted, null, 2)} Risks: ${JSON.stringify(validation, null, 2)}`; const insightResponse = await anthropic.messages.create({ model: 'claude-3-5-sonnet-20241022', max_tokens: 2048, messages: [{ role: 'user', content: insightPrompt }], }); const insightContent = insightResponse.content[0]; if (insightContent.type !== 'text') throw new Error('Invalid response'); const insights = JSON.parse(insightContent.text); return { objectives: extracted.objectives, risks: validation.risks, insights: insights.key_insights, recommended_actions: insights.recommended_actions, }; } // Usage const okrText = `Q3 2025 OKRs: Objective: Increase product adoption KR1: Reach 50,000 active users...`; const currentData = `Week 8 of 13: Active users: 41,500...`; const analysis = await analyzeOKRProgress(okrText, currentData); console.log(`Found ${analysis.risks.length} risks`); console.log(`Generated ${analysis.recommended_actions.length} actions`);
Level 2: With Real-Time Data Integration
Good for: Daily automated updates | Setup time: 2 hours
# Real-Time OKR Tracking (Daily automated updates) import os import json from datetime import datetime from anthropic import Anthropic from notion_client import Client as NotionClient import requests class OKRTracker: def __init__(self): self.anthropic = Anthropic(api_key=os.getenv('ANTHROPIC_API_KEY')) self.notion = NotionClient(auth=os.getenv('NOTION_API_KEY')) self.analytics_api_key = os.getenv('ANALYTICS_API_KEY') def fetch_current_metrics(self) -> dict: """Pull real-time data from analytics APIs""" # Example: Fetch from Google Analytics, Mixpanel, etc. headers = {'Authorization': f'Bearer {self.analytics_api_key}'} # Active users from analytics users_response = requests.get( 'https://api.analytics.com/v1/users/active', headers=headers ) active_users = users_response.json()['count'] # Activation rate from product analytics activation_response = requests.get( 'https://api.analytics.com/v1/activation/rate', headers=headers ) activation_rate = activation_response.json()['rate'] # Churn from subscription data churn_response = requests.get( 'https://api.analytics.com/v1/churn/current', headers=headers ) churn_rate = churn_response.json()['churn_percent'] return { 'active_users': active_users, 'activation_rate': activation_rate, 'churn_rate': churn_rate, 'timestamp': datetime.now().isoformat() } def fetch_okrs_from_notion(self, database_id: str) -> list: """Pull OKRs from Notion database""" results = self.notion.databases.query(database_id=database_id) okrs = [] for page in results['results']: props = page['properties'] okrs.append({ 'id': page['id'], 'objective': props['Objective']['title'][0]['text']['content'], 'kr_description': props['Key Result']['rich_text'][0]['text']['content'], 'target': props['Target']['number'], 'unit': props['Unit']['select']['name'] }) return okrs def analyze_with_ai(self, okrs: list, current_metrics: dict) -> dict: """Run AI analysis on current progress""" prompt = f"""Analyze OKR progress and generate insights. OKRs: {json.dumps(okrs, indent=2)} Current metrics: {json.dumps(current_metrics, indent=2)} Provide JSON with: - progress_summary - risks (array with kr_id, severity, reason) - insights (array with insight, implication, confidence) - recommended_actions (array with priority, action, owner) """ response = self.anthropic.messages.create( model='claude-3-5-sonnet-20241022', max_tokens=3072, messages=[{'role': 'user', 'content': prompt}] ) return json.loads(response.content[0].text) def update_notion_dashboard(self, database_id: str, analysis: dict): """Write analysis back to Notion""" # Create new page in dashboard database self.notion.pages.create( parent={'database_id': database_id}, properties={ 'Date': {'date': {'start': datetime.now().isoformat()}}, 'Summary': {'rich_text': [{'text': {'content': analysis['progress_summary']}}]}, 'Risk Count': {'number': len(analysis['risks'])}, 'Status': {'select': {'name': analysis.get('overall_health', 'unknown')}} }, children=[ { 'object': 'block', 'type': 'heading_2', 'heading_2': {'rich_text': [{'text': {'content': 'Risks'}}]} }, *[ { 'object': 'block', 'type': 'bulleted_list_item', 'bulleted_list_item': { 'rich_text': [{'text': {'content': f"{risk['severity'].upper()}: {risk['reason']}"}}] } } for risk in analysis['risks'] ] ] ) def send_slack_alert(self, risks: list): """Send Slack notification for high-severity risks""" high_risks = [r for r in risks if r['severity'] == 'high'] if not high_risks: return webhook_url = os.getenv('SLACK_WEBHOOK_URL') message = { 'text': f'🚨 {len(high_risks)} High-Priority OKR Risks Detected', 'blocks': [ { 'type': 'section', 'text': {'type': 'mrkdwn', 'text': '*OKR Risk Alert*'} }, *[ { 'type': 'section', 'text': {'type': 'mrkdwn', 'text': f"• {risk['reason']}\n_Action: {risk['recommended_action']}_"} } for risk in high_risks ] ] } requests.post(webhook_url, json=message) def run_daily_update(self, okr_db_id: str, dashboard_db_id: str): """Main function to run daily""" # Fetch data okrs = self.fetch_okrs_from_notion(okr_db_id) current_metrics = self.fetch_current_metrics() # Analyze analysis = self.analyze_with_ai(okrs, current_metrics) # Update dashboards self.update_notion_dashboard(dashboard_db_id, analysis) # Alert on risks self.send_slack_alert(analysis['risks']) return analysis # Usage (run this daily via cron or GitHub Actions) tracker = OKRTracker() result = tracker.run_daily_update( okr_db_id='your-okr-database-id', dashboard_db_id='your-dashboard-database-id' ) print(f"Updated dashboard with {len(result['risks'])} risks")
Level 3: Production Dashboard with LangGraph
Good for: Real-time monitoring with complex workflows | Setup time: 1 day
# Production OKR Dashboard with LangGraph (Real-time monitoring) from langgraph.graph import Graph, END from typing import TypedDict, List, Optional import anthropic import asyncio from datetime import datetime, timedelta import json class OKRState(TypedDict): okrs: List[dict] current_metrics: dict historical_data: List[dict] analysis: Optional[dict] risks: List[dict] insights: List[dict] actions: List[dict] notifications_sent: bool dashboard_updated: bool class ProductionOKRSystem: def __init__(self): self.client = anthropic.Anthropic() self.graph = self.build_graph() async def fetch_data_node(self, state: OKRState) -> OKRState: """Fetch OKRs and current metrics from multiple sources""" # Parallel data fetching okrs, metrics, historical = await asyncio.gather( self.fetch_okrs_async(), self.fetch_current_metrics_async(), self.fetch_historical_data_async() ) state['okrs'] = okrs state['current_metrics'] = metrics state['historical_data'] = historical return state async def analyze_progress_node(self, state: OKRState) -> OKRState: """Analyze current progress vs targets""" prompt = f"""Analyze OKR progress with historical context. Current OKRs: {json.dumps(state['okrs'], indent=2)} Current metrics: {json.dumps(state['current_metrics'], indent=2)} Historical data (last 4 weeks): {json.dumps(state['historical_data'], indent=2)} Provide JSON analysis with: - overall_health (on_track/caution/critical) - progress_by_objective (array with objective_id, completion_percent, trend) - velocity_analysis (are we accelerating or decelerating?) - forecast (will we hit targets at current pace?) """ response = self.client.messages.create( model='claude-3-5-sonnet-20241022', max_tokens=3072, messages=[{'role': 'user', 'content': prompt}] ) state['analysis'] = json.loads(response.content[0].text) return state async def identify_risks_node(self, state: OKRState) -> OKRState: """Identify and prioritize risks""" prompt = f"""Identify risks based on this analysis. Analysis: {json.dumps(state['analysis'], indent=2)} For each risk provide: - kr_id - risk_type (velocity/trajectory/external) - severity (critical/high/medium/low) - time_to_impact (days until this becomes a problem) - recommended_action - estimated_effort (hours to address) Return as JSON array.""" response = self.client.messages.create( model='claude-3-5-sonnet-20241022', max_tokens=2048, messages=[{'role': 'user', 'content': prompt}] ) state['risks'] = json.loads(response.content[0].text) return state async def generate_insights_node(self, state: OKRState) -> OKRState: """Generate strategic insights from analysis""" prompt = f"""Generate strategic insights from this OKR analysis. Analysis: {json.dumps(state['analysis'], indent=2)} Risks: {json.dumps(state['risks'], indent=2)} Provide insights as JSON array with: - insight (what we learned) - implication (what this means for strategy) - confidence (high/medium/low) - data_points (supporting evidence) - suggested_pivot (should we adjust targets or tactics?) """ response = self.client.messages.create( model='claude-3-5-sonnet-20241022', max_tokens=2048, messages=[{'role': 'user', 'content': prompt}] ) state['insights'] = json.loads(response.content[0].text) return state async def generate_actions_node(self, state: OKRState) -> OKRState: """Generate prioritized action items""" prompt = f"""Generate prioritized actions based on risks and insights. Risks: {json.dumps(state['risks'], indent=2)} Insights: {json.dumps(state['insights'], indent=2)} For each action provide: - priority (1-5, 1 is highest) - action (specific, actionable task) - owner (recommended role/team) - deadline (relative: 'this week', 'next 2 weeks', etc.) - impact (what KR this affects) - effort_estimate (hours) Return as JSON array sorted by priority.""" response = self.client.messages.create( model='claude-3-5-sonnet-20241022', max_tokens=2048, messages=[{'role': 'user', 'content': prompt}] ) state['actions'] = json.loads(response.content[0].text) return state async def notify_stakeholders_node(self, state: OKRState) -> OKRState: """Send notifications based on severity""" critical_risks = [r for r in state['risks'] if r['severity'] == 'critical'] high_risks = [r for r in state['risks'] if r['severity'] == 'high'] # Send Slack alerts for critical risks if critical_risks: await self.send_slack_alert( channel='#executive-alerts', message=f"🚨 {len(critical_risks)} CRITICAL OKR risks detected", risks=critical_risks, actions=[a for a in state['actions'] if a['priority'] == 1] ) # Send daily digest to strategy team await self.send_email_digest( to='strategy-team@company.com', subject=f"OKR Update - {state['analysis']['overall_health'].upper()}", analysis=state['analysis'], top_actions=state['actions'][:5] ) state['notifications_sent'] = True return state async def update_dashboards_node(self, state: OKRState) -> OKRState: """Update Notion, Asana, and internal dashboards""" # Update Notion dashboard await self.update_notion_dashboard( analysis=state['analysis'], risks=state['risks'], insights=state['insights'] ) # Create Asana tasks for high-priority actions for action in [a for a in state['actions'] if a['priority'] <= 2]: await self.create_asana_task( name=action['action'], assignee=action['owner'], due_date=self.parse_relative_date(action['deadline']), description=f"Impact: {action['impact']}\nEstimated effort: {action['effort_estimate']} hours" ) state['dashboard_updated'] = True return state def should_send_alerts(self, state: OKRState) -> str: """Determine if we need to send alerts""" critical_count = len([r for r in state['risks'] if r['severity'] == 'critical']) high_count = len([r for r in state['risks'] if r['severity'] == 'high']) if critical_count > 0 or high_count >= 3: return "send_alerts" return "skip_alerts" def build_graph(self) -> Graph: """Build the LangGraph workflow""" graph = Graph() # Add nodes graph.add_node("fetch_data", self.fetch_data_node) graph.add_node("analyze_progress", self.analyze_progress_node) graph.add_node("identify_risks", self.identify_risks_node) graph.add_node("generate_insights", self.generate_insights_node) graph.add_node("generate_actions", self.generate_actions_node) graph.add_node("notify_stakeholders", self.notify_stakeholders_node) graph.add_node("update_dashboards", self.update_dashboards_node) # Define flow graph.set_entry_point("fetch_data") graph.add_edge("fetch_data", "analyze_progress") graph.add_edge("analyze_progress", "identify_risks") graph.add_edge("identify_risks", "generate_insights") graph.add_edge("generate_insights", "generate_actions") # Conditional notification graph.add_conditional_edges( "generate_actions", self.should_send_alerts, { "send_alerts": "notify_stakeholders", "skip_alerts": "update_dashboards" } ) graph.add_edge("notify_stakeholders", "update_dashboards") graph.add_edge("update_dashboards", END) return graph.compile() async def run_daily_analysis(self) -> OKRState: """Run the full analysis workflow""" initial_state: OKRState = { 'okrs': [], 'current_metrics': {}, 'historical_data': [], 'analysis': None, 'risks': [], 'insights': [], 'actions': [], 'notifications_sent': False, 'dashboard_updated': False } result = await self.graph.ainvoke(initial_state) return result # Usage (deploy as scheduled job) async def main(): system = ProductionOKRSystem() result = await system.run_daily_analysis() print(f"Analysis complete: {result['analysis']['overall_health']}") print(f"Risks identified: {len(result['risks'])}") print(f"Actions generated: {len(result['actions'])}") print(f"Notifications sent: {result['notifications_sent']}") # Run daily at 8am via cron or Kubernetes CronJob if __name__ == '__main__': asyncio.run(main())
When to Level Up
Start: Manual Weekly Updates
1-5 objectives, weekly reviews
- Simple script to analyze OKR text + current numbers
- Manual data entry from spreadsheets
- Basic risk identification
- Email reports to team
Scale: Automated Daily Updates
5-15 objectives, daily monitoring
- Real-time data integration from analytics APIs
- Automated Notion/Asana updates
- Slack alerts for high-priority risks
- Historical trend analysis (4-week lookback)
Production: Real-Time Dashboard
15-50 objectives, continuous monitoring
- LangGraph workflow with conditional logic
- Multi-source data aggregation (analytics, CRM, support)
- Predictive forecasting (will we hit targets?)
- Automated task creation in project management tools
- Executive dashboard with live updates
Enterprise: Multi-Department System
50+ objectives across departments
- Department-specific agents (Product, Sales, Marketing, Engineering)
- Cross-functional dependency tracking
- Scenario modeling (what-if analysis)
- Integration with financial planning systems
- Board-ready reporting with narrative generation
Strategy-Specific Gotchas
The code examples above work. But strategic planning has unique challenges you need to handle.
Data Freshness vs API Rate Limits
You want real-time updates, but analytics APIs have rate limits. Google Analytics allows 10 requests/second, Mixpanel is 60/hour for some endpoints. Cache strategically to avoid hitting limits.
import time from functools import wraps from datetime import datetime, timedelta class RateLimitedCache: def __init__(self, ttl_seconds: int, max_requests_per_minute: int): self.cache = {} self.ttl = ttl_seconds self.request_times = [] self.max_rpm = max_requests_per_minute def get_or_fetch(self, key: str, fetch_fn): # Check cache first if key in self.cache: value, timestamp = self.cache[key] if time.time() - timestamp < self.ttl: return value # Rate limit check now = time.time() self.request_times = [t for t in self.request_times if now - t < 60] if len(self.request_times) >= self.max_rpm: # Return stale cache if available if key in self.cache: return self.cache[key][0] # Otherwise wait sleep_time = 60 - (now - self.request_times[0]) time.sleep(sleep_time) # Fetch fresh data value = fetch_fn() self.cache[key] = (value, now) self.request_times.append(now) return value # Usage cache = RateLimitedCache(ttl_seconds=300, max_requests_per_minute=10) def fetch_analytics(): return requests.get('https://api.analytics.com/v1/users').json() # This will cache for 5 minutes and respect rate limits data = cache.get_or_fetch('active_users', fetch_analytics)
Handling Lagging Indicators
Some KRs (like NPS or customer satisfaction) are lagging indicators - you won't see results for weeks. Don't treat them like real-time metrics. Track leading indicators (support tickets, feature usage) as proxies.
interface KRConfig { id: string; type: 'leading' | 'lagging'; update_frequency: 'realtime' | 'daily' | 'weekly' | 'monthly'; leading_proxies?: string[]; // For lagging indicators } const krConfigs: KRConfig[] = [ { id: 'kr_nps', type: 'lagging', update_frequency: 'monthly', leading_proxies: ['support_ticket_count', 'feature_usage_rate', 'churn_rate'] }, { id: 'kr_active_users', type: 'leading', update_frequency: 'realtime', } ]; function shouldUpdateMetric(kr: KRConfig): boolean { const now = new Date(); // Lagging indicators: use proxies for frequent updates if (kr.type === 'lagging') { // Check if it's time for actual metric update if (kr.update_frequency === 'monthly' && now.getDate() !== 1) { return false; // Only update on 1st of month } } return true; } function getMetricsToUpdate(krs: KRConfig[]): string[] { const metricsToUpdate: string[] = []; for (const kr of krs) { if (shouldUpdateMetric(kr)) { metricsToUpdate.push(kr.id); } else if (kr.leading_proxies) { // For lagging indicators, update proxies instead metricsToUpdate.push(...kr.leading_proxies); } } return metricsToUpdate; }
Cross-Department Dependencies
Marketing's KR depends on Engineering's feature launch. Sales KR depends on Product's pricing change. Track dependencies explicitly and alert when blockers appear.
from typing import List, Dict from datetime import datetime class KRDependency: def __init__(self, kr_id: str, depends_on: List[str], block_type: str = 'hard'): self.kr_id = kr_id self.depends_on = depends_on self.block_type = block_type # 'hard' or 'soft' def check_dependencies(krs: List[dict], dependencies: List[KRDependency]) -> List[dict]: """Check if dependencies are blocking progress""" blockers = [] for dep in dependencies: kr = next(k for k in krs if k['id'] == dep.kr_id) for dep_id in dep.depends_on: dep_kr = next(k for k in krs if k['id'] == dep_id) # Hard blocker: dependency must be complete if dep.block_type == 'hard' and dep_kr['status'] != 'complete': blockers.append({ 'blocked_kr': dep.kr_id, 'blocked_by': dep_id, 'severity': 'critical', 'message': f"{kr['description']} is blocked by {dep_kr['description']}", 'recommendation': f"Prioritize {dep_kr['description']} to unblock" }) # Soft blocker: dependency is at risk elif dep.block_type == 'soft' and dep_kr['status'] == 'at_risk': blockers.append({ 'blocked_kr': dep.kr_id, 'blocked_by': dep_id, 'severity': 'high', 'message': f"{kr['description']} may be impacted by delays in {dep_kr['description']}", 'recommendation': f"Monitor {dep_kr['description']} closely" }) return blockers # Define dependencies dependencies = [ KRDependency('marketing_kr_1', ['engineering_kr_2'], 'hard'), KRDependency('sales_kr_1', ['product_kr_1'], 'soft'), ] blockers = check_dependencies(okrs, dependencies) if blockers: # Alert stakeholders about blockers send_blocker_alert(blockers)
Version Control for Goal Changes
Teams adjust targets mid-quarter. You need to track: what was the original target, when did it change, why, who approved it. Otherwise you can't tell if you're succeeding or just lowering the bar.
interface OKRVersion { version: number; timestamp: string; changed_by: string; change_reason: string; target_before: number; target_after: number; approved_by?: string; } interface VersionedKR { id: string; description: string; current_target: number; current_value: number; versions: OKRVersion[]; } function trackTargetChange( kr: VersionedKR, newTarget: number, changedBy: string, reason: string, approvedBy?: string ): VersionedKR { const version: OKRVersion = { version: kr.versions.length + 1, timestamp: new Date().toISOString(), changed_by: changedBy, change_reason: reason, target_before: kr.current_target, target_after: newTarget, approved_by: approvedBy }; kr.versions.push(version); kr.current_target = newTarget; return kr; } function analyzeTargetChanges(kr: VersionedKR): { total_changes: number; direction: 'increased' | 'decreased' | 'mixed'; net_change_percent: number; red_flags: string[]; } { const redFlags: string[] = []; // Flag if changed more than twice if (kr.versions.length > 2) { redFlags.push('Target changed more than twice this quarter'); } // Flag if decreased without exec approval const decreases = kr.versions.filter( v => v.target_after < v.target_before && !v.approved_by ); if (decreases.length > 0) { redFlags.push('Target decreased without executive approval'); } // Calculate net change const original = kr.versions[0]?.target_before || kr.current_target; const netChange = ((kr.current_target - original) / original) * 100; return { total_changes: kr.versions.length, direction: netChange > 0 ? 'increased' : netChange < 0 ? 'decreased' : 'mixed', net_change_percent: netChange, red_flags: redFlags }; }
Timezone Handling for Global Teams
Your US team updates metrics at 5pm EST. Your APAC team updates at 9am SGT. Your dashboard shows 'today's progress' but it's yesterday for half the team. Normalize all timestamps to UTC and show local time in UI.
from datetime import datetime, timezone import pytz class GlobalOKRTracker: def __init__(self): self.team_timezones = { 'us_east': 'America/New_York', 'us_west': 'America/Los_Angeles', 'emea': 'Europe/London', 'apac': 'Asia/Singapore' } def normalize_timestamp(self, timestamp: str, team: str) -> datetime: """Convert team's local time to UTC""" team_tz = pytz.timezone(self.team_timezones[team]) local_time = datetime.fromisoformat(timestamp) local_time = team_tz.localize(local_time) return local_time.astimezone(timezone.utc) def get_current_business_day(self, team: str) -> str: """Get current business day in team's timezone""" team_tz = pytz.timezone(self.team_timezones[team]) now_local = datetime.now(team_tz) return now_local.strftime('%Y-%m-%d') def aggregate_daily_metrics(self, metrics: List[dict]) -> dict: """Aggregate metrics from all teams, handling timezone differences""" # Group by UTC date by_date = {} for metric in metrics: utc_time = self.normalize_timestamp( metric['timestamp'], metric['team'] ) date_key = utc_time.strftime('%Y-%m-%d') if date_key not in by_date: by_date[date_key] = [] by_date[date_key].append(metric) # Aggregate by date aggregated = {} for date, date_metrics in by_date.items(): aggregated[date] = { 'total_updates': len(date_metrics), 'teams_reporting': len(set(m['team'] for m in date_metrics)), 'average_progress': sum(m['progress'] for m in date_metrics) / len(date_metrics) } return aggregated # Usage tracker = GlobalOKRTracker() # US team reports at 5pm EST us_metric = { 'team': 'us_east', 'timestamp': '2025-09-23T17:00:00', 'progress': 82 } # APAC team reports at 9am SGT (which is 9pm EST previous day) apac_metric = { 'team': 'apac', 'timestamp': '2025-09-23T09:00:00', 'progress': 78 } # Both get normalized to UTC for accurate aggregation us_utc = tracker.normalize_timestamp(us_metric['timestamp'], 'us_east') apac_utc = tracker.normalize_timestamp(apac_metric['timestamp'], 'apac') print(f"US report time (UTC): {us_utc}") print(f"APAC report time (UTC): {apac_utc}")
Cost Calculator
Manual OKR Tracking
Limitations:
- • Data is 3-7 days stale by review time
- • No real-time risk detection
- • Manual errors in data entry (~15%)
- • Can't scale beyond 10-15 objectives
Automated OKR Dashboard
Benefits:
- ✓ Real-time data (updated daily)
- ✓ Automated risk detection within 24 hours
- ✓ Zero manual data entry
- ✓ Scales to 50+ objectives easily
- ✓ Historical trend analysis included