The database layer is where AI products either scale gracefully or collapse under their own weight. Unlike traditional applications where you're storing and retrieving structured data, AI workloads demand a fundamentally different approach—you're dealing with high-dimensional vectors, streaming conversation histories, real-time user preferences that inform model behavior, and caching layers that can make or break your cost structure.
123456789101112import { Pinecone } from '@pinecone-database/pinecone'; import { OpenAI } from 'openai'; const pinecone = new Pinecone({ apiKey: process.env.PINECONE_API_KEY }); const openai = new OpenAI(); interface Document { id: string; content: string; metadata: { userId: string; createdAt: string;
123456789101112import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; interface ChunkWithContext { content: string; metadata: { documentId: string; documentTitle: string; sectionPath: string[]; chunkIndex: number; totalChunks: number; previousChunkId: string | null; nextChunkId: string | null;
123456789101112// Optimized schema for AI conversation storage interface ConversationSchema { // Core tables conversations: { id: string; // UUID v7 for time-ordering user_id: string; created_at: Date; updated_at: Date; metadata: { model: string; // 'gpt-4', 'claude-3' system_prompt_version: string; total_tokens: number;
123456789101112interface UserPreferences { // Core preferences communicationStyle: 'concise' | 'detailed' | 'conversational'; expertiseLevel: 'beginner' | 'intermediate' | 'expert'; preferredLanguage: string; timezone: string; // AI-specific preferences responseFormat: { includeExamples: boolean; includeCodeSnippets: boolean; maxResponseLength: 'short' | 'medium' | 'long';
123456789101112import { Pinecone } from '@pinecone-database/pinecone'; import { Redis } from 'ioredis'; import { createHash } from 'crypto'; class VectorSearchService { private pinecone: Pinecone; private redis: Redis; private index: any; constructor() { this.pinecone = new Pinecone({ apiKey: process.env.PINECONE_KEY! }); this.redis = new Redis(process.env.REDIS_URL!);
123456789101112import { Redis } from 'ioredis'; import { LRUCache } from 'lru-cache'; class AIResponseCache { private l1Cache: LRUCache<string, CachedResponse>; private l2Cache: Redis; private metrics: CacheMetrics; constructor() { // L1: In-memory, 1000 items, 5 min TTL this.l1Cache = new LRUCache({ max: 1000,
123456789101112// Migration service for seamless database transitions class DatabaseMigrationService { private sourceDb: PostgresClient; private targetVectorStore: PineconeClient; private redis: Redis; async migrateVectorsIncrementally(options: MigrationOptions) { const batchSize = options.batchSize || 1000; let lastId = options.resumeFrom || 0; let migrated = 0; while (true) {