Skip to Content
📚 MyStoryFlow Docs — Your guide to preserving family stories
Tools FrameworkRomance Writing Prompts Generator - Complete Implementation Example

Romance Writing Prompts Generator - Complete Implementation Example

Overview

The Romance Writing Prompts Generator serves as the reference implementation for the Story Prompt Architecture pattern. This tool targets the “romance writing prompts” keyword (28 impressions) and demonstrates all framework components including AI generation, admin management, response system, and cross-tool integration.

Tool Specifications:

  • URL: /romance-writing-prompts
  • Target Keywords: “romance writing prompts”, “romantic story ideas”, “love story prompts”
  • Architecture Pattern: Story Prompt Architecture
  • User Flow: Generate prompts → Write responses → Share in community
  • Admin Features: Bulk generation, quality review, featured collections

Database Schema

1. Main Prompts Table

-- Romance Writing Prompts Migration -- File: supabase/migrations/20250125_180000_create_romance_prompts_tables.sql CREATE TABLE IF NOT EXISTS tools_romance_prompts ( -- Standard tool fields id UUID PRIMARY KEY DEFAULT gen_random_uuid(), title TEXT NOT NULL, share_code TEXT UNIQUE NOT NULL, user_id UUID REFERENCES auth.users(id) ON DELETE CASCADE, session_id TEXT NOT NULL, -- Visibility & management is_public BOOLEAN DEFAULT FALSE, is_featured BOOLEAN DEFAULT FALSE, is_reviewed BOOLEAN DEFAULT FALSE, -- Analytics view_count INTEGER DEFAULT 0, share_count INTEGER DEFAULT 0, export_count INTEGER DEFAULT 0, use_count INTEGER DEFAULT 0, -- How many responses written -- SEO optimization seo_title TEXT, seo_description TEXT, keywords TEXT[], -- AI integration is_ai_generated BOOLEAN DEFAULT TRUE, ai_generation_prompt TEXT, ai_confidence DECIMAL(3,2) DEFAULT 0.8, ai_tokens_used INTEGER, ai_cost_usd DECIMAL(10,4), ai_model TEXT, generation_time_ms INTEGER, -- Romance-specific fields prompt_text TEXT NOT NULL, romance_subgenre VARCHAR(50), -- contemporary, historical, paranormal, etc. heat_level VARCHAR(20), -- sweet, steamy, erotic setting_era VARCHAR(50), -- modern, regency, medieval, future character_archetypes TEXT[], -- alpha_hero, shy_heroine, enemies_to_lovers relationship_dynamic VARCHAR(50), -- enemies_to_lovers, second_chance, fake_dating tropes TEXT[], -- instalove, forbidden_love, workplace_romance conflict_type VARCHAR(50), -- internal, external, both emotional_beats TEXT[], -- meet_cute, first_kiss, black_moment, hea word_count_range VARCHAR(50), -- 500-1000, 1000-2500, etc. time_estimate VARCHAR(50), -- 30 mins, 1 hour, 2 hours -- Content structure (JSONB for flexibility) content JSONB NOT NULL DEFAULT '{}'::jsonb, generation_options JSONB DEFAULT '{}'::jsonb, metadata JSONB DEFAULT '{}'::jsonb, -- Timestamps created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW(), expires_at TIMESTAMPTZ -- for anonymous content ); -- Performance indexes CREATE INDEX IF NOT EXISTS idx_romance_prompts_subgenre ON tools_romance_prompts(romance_subgenre); CREATE INDEX IF NOT EXISTS idx_romance_prompts_heat_level ON tools_romance_prompts(heat_level); CREATE INDEX IF NOT EXISTS idx_romance_prompts_tropes ON tools_romance_prompts USING GIN(tropes); CREATE INDEX IF NOT EXISTS idx_romance_prompts_featured ON tools_romance_prompts(is_featured) WHERE is_featured = true; CREATE INDEX IF NOT EXISTS idx_romance_prompts_public ON tools_romance_prompts(is_public, created_at) WHERE is_public = true;

2. User Responses Table

CREATE TABLE IF NOT EXISTS tools_romance_prompt_responses ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), prompt_id UUID REFERENCES tools_romance_prompts(id) ON DELETE CASCADE, user_id UUID REFERENCES auth.users(id) ON DELETE CASCADE, session_id TEXT NOT NULL, -- Content title TEXT NOT NULL, content TEXT NOT NULL, word_count INTEGER DEFAULT 0, excerpt TEXT, -- First 200 characters for previews -- Romance-specific response data story_completion_level VARCHAR(20), -- beginning, middle, complete heat_level_written VARCHAR(20), -- what user actually wrote tags TEXT[], -- user-defined tags -- Visibility & engagement is_public BOOLEAN DEFAULT FALSE, is_featured BOOLEAN DEFAULT FALSE, view_count INTEGER DEFAULT 0, like_count INTEGER DEFAULT 0, share_count INTEGER DEFAULT 0, -- Quality metrics quality_score DECIMAL(3,2), -- AI-generated quality score readability_score INTEGER, -- Flesch reading score -- SEO (for public responses) seo_title TEXT, seo_description TEXT, keywords TEXT[], -- Timestamps created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW(), writing_time_minutes INTEGER, -- How long user spent writing last_accessed_at TIMESTAMPTZ ); -- Indexes for responses CREATE INDEX IF NOT EXISTS idx_romance_responses_prompt_id ON tools_romance_prompt_responses(prompt_id); CREATE INDEX IF NOT EXISTS idx_romance_responses_public ON tools_romance_prompt_responses(is_public, created_at) WHERE is_public = true; CREATE INDEX IF NOT EXISTS idx_romance_responses_featured ON tools_romance_prompt_responses(is_featured) WHERE is_featured = true;

3. Collections Table

CREATE TABLE IF NOT EXISTS tools_romance_prompt_collections ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), title TEXT NOT NULL, description TEXT, slug TEXT UNIQUE NOT NULL, cover_image_url TEXT, -- Romance collection specifics romance_subgenre VARCHAR(50), theme VARCHAR(100), -- Valentine's Day, Wedding Season, etc. difficulty_level VARCHAR(20), -- beginner, intermediate, advanced estimated_time VARCHAR(50), -- 1 week challenge, 30-day challenge -- Management is_featured BOOLEAN DEFAULT FALSE, is_published BOOLEAN DEFAULT TRUE, prompt_count INTEGER DEFAULT 0, total_responses INTEGER DEFAULT 0, -- SEO seo_title TEXT, seo_description TEXT, keywords TEXT[], -- Timestamps created_at TIMESTAMPTZ DEFAULT NOW(), updated_at TIMESTAMPTZ DEFAULT NOW() ); -- Junction table for prompts in collections CREATE TABLE IF NOT EXISTS tools_romance_collection_prompts ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), collection_id UUID REFERENCES tools_romance_prompt_collections(id) ON DELETE CASCADE, prompt_id UUID REFERENCES tools_romance_prompts(id) ON DELETE CASCADE, sort_order INTEGER DEFAULT 0, day_number INTEGER, -- for daily challenge collections UNIQUE(collection_id, prompt_id) );

4. Analytics Table

CREATE TABLE IF NOT EXISTS tools_romance_prompt_analytics ( id UUID PRIMARY KEY DEFAULT gen_random_uuid(), prompt_id UUID REFERENCES tools_romance_prompts(id) ON DELETE CASCADE, event_type VARCHAR(50) NOT NULL, -- generation, view, response, share, export event_data JSONB, user_id UUID REFERENCES auth.users(id), session_id TEXT, ip_address TEXT, user_agent TEXT, referrer TEXT, created_at TIMESTAMPTZ DEFAULT NOW() ); CREATE INDEX IF NOT EXISTS idx_romance_analytics_prompt_id ON tools_romance_prompt_analytics(prompt_id); CREATE INDEX IF NOT EXISTS idx_romance_analytics_event_type ON tools_romance_prompt_analytics(event_type); CREATE INDEX IF NOT EXISTS idx_romance_analytics_created_at ON tools_romance_prompt_analytics(created_at);

AI Service Implementation

1. Romance Prompts AI Class

// File: src/lib/romance-prompts-ai.ts import OpenAI from 'openai' import { trackAIUsage, estimateTokenCount, estimateTokenCost, getRecommendedToolsModel } from './ai-usage-tracker' export interface RomancePromptOptions { count: number romanceSubgenre: string heatLevel: string settingEra: string characterArchetypes: string[] relationshipDynamic: string tropes: string[] conflictType: string wordCountRange: string timeEstimate: string customTheme?: string } export interface RomancePrompt { id?: string promptText: string romanceSubgenre: string heatLevel: string settingEra: string characterArchetypes: string[] relationshipDynamic: string tropes: string[] conflictType: string emotionalBeats: string[] wordCountRange: string timeEstimate: string // SEO fields seoTitle: string seoDescription: string keywords: string[] // AI metadata aiConfidence: number aiGenerationPrompt: string // Content structure content: { hookLine: string characterSetup: string conflictSeed: string emotionalStakes: string settingDetails: string writingTips: string[] } } export class RomancePromptsAI { private openai: OpenAI constructor() { this.openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY, }) } async generateRomancePrompts( options: RomancePromptOptions, userId?: string, sessionId?: string ): Promise<RomancePrompt[]> { const startTime = Date.now() let totalTokens = 0 let success = false try { const systemPrompt = this.buildSystemPrompt(options) const userPrompt = this.buildUserPrompt(options) const response = await this.openai.chat.completions.create({ model: getRecommendedToolsModel(), messages: [ { role: 'system', content: systemPrompt }, { role: 'user', content: userPrompt } ], temperature: 0.8, max_tokens: 4000, response_format: { type: 'json_object' } }) totalTokens = response.usage?.total_tokens || estimateTokenCount(systemPrompt + userPrompt) const cost = estimateTokenCost(getRecommendedToolsModel(), totalTokens) const generationTime = Date.now() - startTime const prompts = this.parseRomancePrompts(response.choices[0].message.content!, options) success = true // REQUIRED: Track AI usage for admin monitoring await trackAIUsage({ userId, sessionId, featureName: 'romance_prompts_generation', promptName: 'romance_prompt_creation', modelUsed: getRecommendedToolsModel(), provider: 'openai', tokensConsumed: totalTokens, costUsd: cost, responseTimeMs: generationTime, requestParams: options, requestSuccess: true, generatedPrompts: prompts.map(p => ({ promptText: p.promptText, subgenre: p.romanceSubgenre, tropes: p.tropes, confidence: p.aiConfidence })) }) return prompts } catch (error) { const cost = estimateTokenCost(getRecommendedToolsModel(), totalTokens) const generationTime = Date.now() - startTime // Track AI failures await trackAIUsage({ userId, sessionId, featureName: 'romance_prompts_generation', promptName: 'romance_prompt_creation', modelUsed: getRecommendedToolsModel(), provider: 'openai', tokensConsumed: totalTokens, costUsd: cost, responseTimeMs: generationTime, requestParams: options, requestSuccess: false, errorMessage: error instanceof Error ? error.message : 'Unknown error' }) throw new Error('Failed to generate romance prompts. Please try again.') } } private buildSystemPrompt(options: RomancePromptOptions): string { return `You are an expert romance writing prompt generator specializing in ${options.romanceSubgenre} romance. Your task is to create compelling, emotionally engaging writing prompts that: 1. Incorporate the requested tropes and dynamics naturally 2. Provide clear character motivation and conflict 3. Set up emotional stakes that readers care about 4. Include specific setting details that enhance the romance 5. Offer concrete writing direction without being prescriptive Generate exactly ${options.count} prompts that vary in approach but maintain consistent quality. Romance Expertise Guidelines: - ${options.heatLevel} heat level means: ${this.getHeatLevelGuidance(options.heatLevel)} - ${options.settingEra} setting requires: ${this.getSettingGuidance(options.settingEra)} - Tropes to incorporate: ${options.tropes.join(', ')} - Relationship dynamic: ${options.relationshipDynamic} - Target conflict type: ${options.conflictType} Return a JSON object with a "prompts" array. Each prompt should include: - promptText: The main writing prompt (engaging and specific) - hookLine: An attention-grabbing opening line suggestion - characterSetup: Brief character background that creates chemistry - conflictSeed: The central tension that drives the story - emotionalStakes: What the characters stand to gain/lose - settingDetails: Specific details about time, place, atmosphere - writingTips: 3-4 craft tips specific to this prompt - seoTitle: SEO-optimized title for the prompt - seoDescription: Meta description for search engines - keywords: 5-7 relevant search keywords` } private buildUserPrompt(options: RomancePromptOptions): string { return `Create ${options.count} romance writing prompts with these specifications: Romance Subgenre: ${options.romanceSubgenre} Heat Level: ${options.heatLevel} Setting Era: ${options.settingEra} Character Archetypes: ${options.characterArchetypes.join(', ')} Relationship Dynamic: ${options.relationshipDynamic} Tropes: ${options.tropes.join(', ')} Conflict Type: ${options.conflictType} Target Length: ${options.wordCountRange} words Time Estimate: ${options.timeEstimate} ${options.customTheme ? `Special Theme: ${options.customTheme}` : ''} Ensure each prompt: 1. Is unique and creative while fitting the specifications 2. Has clear romantic tension and emotional stakes 3. Provides enough detail for writers to start immediately 4. Includes era-appropriate language and situations 5. Balances familiar tropes with fresh twists 6. Offers multiple directions the story could develop Focus on prompts that would perform well for the keywords: "romance writing prompts", "romantic story ideas", "${options.romanceSubgenre} romance prompts"` } private parseRomancePrompts(response: string, options: RomancePromptOptions): RomancePrompt[] { try { const parsed = JSON.parse(response) const prompts = parsed.prompts || [] return prompts.map((prompt: any, index: number) => ({ id: `${Date.now()}-${index}`, promptText: prompt.promptText, romanceSubgenre: options.romanceSubgenre, heatLevel: options.heatLevel, settingEra: options.settingEra, characterArchetypes: options.characterArchetypes, relationshipDynamic: options.relationshipDynamic, tropes: options.tropes, conflictType: options.conflictType, emotionalBeats: this.extractEmotionalBeats(prompt.promptText), wordCountRange: options.wordCountRange, timeEstimate: options.timeEstimate, seoTitle: prompt.seoTitle || `${options.romanceSubgenre} Romance Writing Prompt`, seoDescription: prompt.seoDescription || prompt.promptText.substring(0, 155), keywords: prompt.keywords || this.generateKeywords(options), aiConfidence: this.calculateConfidence(prompt), aiGenerationPrompt: 'romance_prompt_generation', content: { hookLine: prompt.hookLine || '', characterSetup: prompt.characterSetup || '', conflictSeed: prompt.conflictSeed || '', emotionalStakes: prompt.emotionalStakes || '', settingDetails: prompt.settingDetails || '', writingTips: prompt.writingTips || [] } })) } catch (error) { throw new Error('Failed to parse AI response') } } private getHeatLevelGuidance(heatLevel: string): string { const guidance = { sweet: 'Focus on emotional intimacy, hand-holding, sweet kisses. Fade to black for physical intimacy.', steamy: 'Include passionate kissing, sexual tension, some physical intimacy with tasteful descriptions.', erotic: 'Explicit physical intimacy is appropriate and expected. Focus on desire and sensuality.' } return guidance[heatLevel as keyof typeof guidance] || guidance.sweet } private getSettingGuidance(era: string): string { const guidance = { contemporary: 'Modern technology, current social norms, contemporary careers and lifestyles', historical: 'Period-appropriate customs, language, and social constraints that create romantic tension', regency: 'Strict social rules, chaperoned meetings, marriage mart dynamics, propriety constraints', western: 'Frontier settings, independent characters, survival themes, small town dynamics', paranormal: 'Supernatural elements that enhance rather than overshadow the romance', fantasy: 'Magical systems that create unique romantic obstacles and opportunities' } return guidance[era as keyof typeof guidance] || 'Authentic period details that enhance the romantic tension' } private extractEmotionalBeats(promptText: string): string[] { // Simple extraction of emotional moments from the prompt const beats = [] if (promptText.includes('meet') || promptText.includes('first')) beats.push('meet_cute') if (promptText.includes('conflict') || promptText.includes('misunderstand')) beats.push('black_moment') if (promptText.includes('realize') || promptText.includes('discover')) beats.push('revelation') if (promptText.includes('together') || promptText.includes('forever')) beats.push('hea') return beats } private calculateConfidence(prompt: any): number { let confidence = 0.7 // Base confidence // Boost confidence based on completeness if (prompt.hookLine) confidence += 0.05 if (prompt.characterSetup) confidence += 0.05 if (prompt.conflictSeed) confidence += 0.05 if (prompt.writingTips && prompt.writingTips.length > 0) confidence += 0.05 if (prompt.promptText.length > 200) confidence += 0.05 return Math.min(confidence, 0.95) // Cap at 95% } private generateKeywords(options: RomancePromptOptions): string[] { return [ 'romance writing prompts', `${options.romanceSubgenre} romance`, 'romantic story ideas', 'love story prompts', `${options.heatLevel} romance`, options.relationshipDynamic.replace('_', ' '), ...options.tropes.slice(0, 2) ] } }

API Implementation

1. Generation Endpoint

// File: src/app/api/romance-prompts/generate/route.ts import { NextRequest, NextResponse } from 'next/server' import { z } from 'zod' import { RomancePromptsAI } from '@/lib/romance-prompts-ai' import { checkRateLimit, getRateLimitHeaders } from '@/lib/rate-limiter' import { SessionManager } from '@/lib/session' import { getSupabaseTools } from '@/lib/supabase' import { UniversalShareCodeManager } from '@/lib/universal-share-code' import { trackAnalytics } from '@/lib/analytics' // Add romance prompts to rate limits const RATE_LIMITS = { romance_prompt_generation: { feature: 'romance_prompt_generation', maxRequests: 15, // Higher limit for romance prompts due to popularity windowMinutes: 24 * 60 } } // Validation schema const generateRomancePromptsSchema = z.object({ count: z.number().min(1).max(10).default(5), romanceSubgenre: z.enum(['contemporary', 'historical', 'paranormal', 'fantasy', 'western', 'regency']), heatLevel: z.enum(['sweet', 'steamy', 'erotic']).default('steamy'), settingEra: z.string().min(1), characterArchetypes: z.array(z.string()).min(1), relationshipDynamic: z.enum(['enemies_to_lovers', 'friends_to_lovers', 'second_chance', 'fake_dating', 'forced_proximity', 'workplace_romance']), tropes: z.array(z.string()).min(1), conflictType: z.enum(['internal', 'external', 'both']).default('both'), wordCountRange: z.string().default('1000-2500'), timeEstimate: z.string().default('1-2 hours'), customTheme: z.string().optional(), title: z.string().max(200).optional(), isPublic: z.boolean().default(false) }) export async function POST(request: NextRequest) { try { // Get session and user info const sessionId = SessionManager.getSessionId(request) const userId = request.headers.get('x-user-id') || undefined // Parse and validate request const body = await request.json() const validatedData = generateRomancePromptsSchema.parse(body) // Check rate limits const rateLimitResult = await checkRateLimit( request, 'romance_prompt_generation', userId ) if (!rateLimitResult.allowed) { return NextResponse.json( { success: false, error: 'Rate limit exceeded. Please try again later.', meta: { remainingLimit: rateLimitResult.remaining, resetTime: rateLimitResult.resetAt.toISOString() } }, { status: 429, headers: getRateLimitHeaders(rateLimitResult) } ) } // Generate prompts using AI const romanceAI = new RomancePromptsAI() const startTime = Date.now() const prompts = await romanceAI.generateRomancePrompts( validatedData, userId, sessionId ) const generationTime = Date.now() - startTime // Save prompts to database const supabase = getSupabaseTools() const savedPrompts = [] for (const prompt of prompts) { try { // Generate share code const shareCode = await UniversalShareCodeManager.generateRomancePromptShareCode( prompt.romanceSubgenre, prompt.seoTitle ) const { data, error } = await supabase .from('tools_romance_prompts') .insert({ title: validatedData.title || `${prompt.romanceSubgenre} Romance Prompt`, share_code: shareCode, user_id: userId, session_id: sessionId, is_public: validatedData.isPublic, // Romance-specific fields prompt_text: prompt.promptText, romance_subgenre: prompt.romanceSubgenre, heat_level: prompt.heatLevel, setting_era: prompt.settingEra, character_archetypes: prompt.characterArchetypes, relationship_dynamic: prompt.relationshipDynamic, tropes: prompt.tropes, conflict_type: prompt.conflictType, emotional_beats: prompt.emotionalBeats, word_count_range: prompt.wordCountRange, time_estimate: prompt.timeEstimate, // Content structure content: prompt.content, generation_options: validatedData, // SEO fields seo_title: prompt.seoTitle, seo_description: prompt.seoDescription, keywords: prompt.keywords, // AI metadata is_ai_generated: true, ai_generation_prompt: prompt.aiGenerationPrompt, ai_confidence: prompt.aiConfidence, generation_time_ms: generationTime, // Anonymous content expires in 24 hours expires_at: userId ? null : new Date(Date.now() + 24 * 60 * 60 * 1000).toISOString() }) .select() .single() if (error) { console.error('Database save error:', error) continue // Continue with other prompts } savedPrompts.push({ ...prompt, id: data.id, shareCode: data.share_code, createdAt: data.created_at, updatedAt: data.updated_at }) } catch (promptError) { console.error('Error saving prompt:', promptError) continue } } // Track analytics await trackAnalytics('romance_prompt_generated', { tool: 'romance_prompts', sessionId, userId, subgenre: validatedData.romanceSubgenre, heatLevel: validatedData.heatLevel, promptCount: savedPrompts.length, generationTime }) // Log to analytics table for (const prompt of savedPrompts) { await supabase .from('tools_romance_prompt_analytics') .insert({ prompt_id: prompt.id, event_type: 'generation', event_data: { subgenre: prompt.romanceSubgenre, tropes: prompt.tropes, generationTime }, user_id: userId, session_id: sessionId, ip_address: request.headers.get('x-forwarded-for') || 'unknown', user_agent: request.headers.get('user-agent') || 'unknown' }) } return NextResponse.json({ success: true, data: { prompts: savedPrompts, remainingLimit: rateLimitResult.remaining, generationTime }, meta: { remainingLimit: rateLimitResult.remaining, processingTime: generationTime, version: '1.0' } }, { headers: getRateLimitHeaders(rateLimitResult) }) } catch (error) { console.error('Romance prompt generation error:', error) if (error instanceof z.ZodError) { return NextResponse.json( { success: false, error: 'Invalid request data', details: error.errors }, { status: 400 } ) } return NextResponse.json( { success: false, error: 'Failed to generate romance prompts. Please try again.' }, { status: 500 } ) } } // CORS handler export async function OPTIONS(request: NextRequest) { return new Response(null, { status: 200, headers: { 'Access-Control-Allow-Origin': process.env.WEB_APP_URL || 'https://mystoryflow.com', 'Access-Control-Allow-Methods': 'POST, OPTIONS', 'Access-Control-Allow-Headers': 'Content-Type, Authorization, X-Session-ID, X-User-ID', 'Access-Control-Allow-Credentials': 'true', 'Access-Control-Max-Age': '86400' } }) }

2. Response System Endpoints

// File: src/app/api/romance-prompts/[id]/responses/route.ts import { NextRequest, NextResponse } from 'next/server' import { getSupabaseTools } from '@/lib/supabase' import { validateApiContent, sanitizeContent } from '@/lib/content-validator' import { checkRateLimit, getRateLimitHeaders } from '@/lib/rate-limiter' interface Context { params: Promise<{ id: string }> } // GET - View responses for a romance prompt export async function GET(request: NextRequest, context: Context) { try { const params = await context.params const { id: promptId } = params const searchParams = request.nextUrl.searchParams const page = parseInt(searchParams.get('page') || '1') const limit = Math.min(parseInt(searchParams.get('limit') || '20'), 50) const offset = (page - 1) * limit const sortBy = searchParams.get('sort') || 'recent' // recent, popular, featured const supabase = getSupabaseTools() // Build query based on sort preference let query = supabase .from('tools_romance_prompt_responses') .select(` id, title, content, excerpt, word_count, view_count, like_count, story_completion_level, heat_level_written, tags, is_featured, created_at, updated_at `, { count: 'exact' }) .eq('prompt_id', promptId) .eq('is_public', true) // Apply sorting switch (sortBy) { case 'popular': query = query.order('like_count', { ascending: false }) break case 'featured': query = query.order('is_featured', { ascending: false }) .order('created_at', { ascending: false }) break default: query = query.order('created_at', { ascending: false }) } const { data: responses, error, count } = await query .range(offset, offset + limit - 1) if (error) throw error // Transform responses const transformedResponses = (responses || []).map(response => ({ id: response.id, title: response.title, excerpt: response.excerpt || response.content.substring(0, 200) + '...', wordCount: response.word_count, viewCount: response.view_count, likeCount: response.like_count, completionLevel: response.story_completion_level, heatLevel: response.heat_level_written, tags: response.tags || [], isFeatured: response.is_featured, createdAt: response.created_at, authorName: 'Anonymous' // Privacy protection })) return NextResponse.json({ success: true, data: transformedResponses, meta: { page, limit, total: count || 0, hasMore: (count || 0) > offset + limit, sortBy } }) } catch (error) { console.error('Error fetching romance prompt responses:', error) return NextResponse.json( { success: false, error: 'Failed to fetch responses' }, { status: 500 } ) } } // POST - Create a new response to a romance prompt export async function POST(request: NextRequest, context: Context) { try { // Rate limiting for story submissions const rateLimitResult = await checkRateLimit(request, 'romance_story_submission') if (!rateLimitResult.allowed) { return NextResponse.json( { success: false, error: 'Too many story submissions. Please wait before submitting again.' }, { status: 429, headers: getRateLimitHeaders(rateLimitResult) } ) } const params = await context.params const { id: promptId } = params const body = await request.json() const { title, content, isPublic = false, storyCompletionLevel = 'beginning', heatLevelWritten = 'sweet', tags = [], writingTimeMinutes } = body // Get session info const sessionId = request.headers.get('X-Session-ID') || 'anonymous' const userId = request.headers.get('X-User-ID') || null // Validate required fields if (!title || !content) { return NextResponse.json( { success: false, error: 'Title and content are required' }, { status: 400 } ) } // Content validation const contentValidation = validateApiContent({ title, content }) if (!contentValidation.isValid) { return NextResponse.json( { success: false, error: 'Content validation failed', details: contentValidation.errors }, { status: 400 } ) } // Sanitize content const sanitizedTitle = sanitizeContent(title.trim()) const sanitizedContent = sanitizeContent(content.trim()) const wordCount = sanitizedContent.split(/\s+/).length const excerpt = sanitizedContent.substring(0, 200) const supabase = getSupabaseTools() // Insert response const { data: response, error } = await supabase .from('tools_romance_prompt_responses') .insert({ prompt_id: promptId, user_id: userId, session_id: sessionId, title: sanitizedTitle, content: sanitizedContent, excerpt, word_count: wordCount, story_completion_level: storyCompletionLevel, heat_level_written: heatLevelWritten, tags, is_public: isPublic, writing_time_minutes: writingTimeMinutes, // Auto-generate SEO for public responses seo_title: isPublic ? `${sanitizedTitle} - Romance Story Response` : null, seo_description: isPublic ? excerpt : null, keywords: isPublic ? ['romance story', 'creative writing', heatLevelWritten, ...tags.slice(0, 3)] : null }) .select() .single() if (error) throw error // Update prompt's use count await supabase .from('tools_romance_prompts') .update({ use_count: supabase.raw('use_count + 1') }) .eq('id', promptId) // Log analytics await supabase .from('tools_romance_prompt_analytics') .insert({ prompt_id: promptId, event_type: 'response_created', event_data: { responseId: response.id, wordCount, completionLevel: storyCompletionLevel, heatLevel: heatLevelWritten, isPublic, writingTime: writingTimeMinutes }, user_id: userId, session_id: sessionId, ip_address: request.headers.get('x-forwarded-for') || 'unknown', user_agent: request.headers.get('user-agent') || 'unknown' }) return NextResponse.json({ success: true, data: { id: response.id, title: response.title, excerpt: response.excerpt, wordCount: response.word_count, isPublic: response.is_public, createdAt: response.created_at } }) } catch (error) { console.error('Error creating romance story response:', error) return NextResponse.json( { success: false, error: 'Failed to create response' }, { status: 500 } ) } }

Admin Management System

1. Bulk Generation Endpoint

// File: src/app/api/admin/romance-prompts/bulk-generate/route.ts import { NextRequest, NextResponse } from 'next/server' import { getSupabaseTools } from '@/lib/supabase' import { RomancePromptsAI } from '@/lib/romance-prompts-ai' import { trackAIUsage, getRecommendedToolsModel } from '@/lib/ai-usage-tracker' export async function POST(request: NextRequest) { const startTime = Date.now() const sessionId = `admin-bulk-${Date.now()}` try { const body = await request.json() const { collectionId, count = 20, romanceSubgenre, heatLevel, settingEra, characterArchetypes, relationshipDynamic, tropes, theme } = body // Validate admin access (implement your admin auth logic) const isAdmin = await validateAdminAccess(request) if (!isAdmin) { return NextResponse.json( { success: false, error: 'Admin access required' }, { status: 403 } ) } if (count > 100) { return NextResponse.json( { success: false, error: 'Maximum 100 prompts per bulk operation' }, { status: 400 } ) } const supabase = getSupabaseTools() // Get collection details if provided let collection = null if (collectionId) { const { data: collectionData, error } = await supabase .from('tools_romance_prompt_collections') .select('*') .eq('id', collectionId) .single() if (error) { return NextResponse.json( { success: false, error: 'Collection not found' }, { status: 404 } ) } collection = collectionData } // Generate prompts in batches to avoid timeouts const batchSize = 10 const batches = Math.ceil(count / batchSize) const allPrompts = [] const romanceAI = new RomancePromptsAI() for (let i = 0; i < batches; i++) { const batchCount = Math.min(batchSize, count - (i * batchSize)) const options = { count: batchCount, romanceSubgenre, heatLevel, settingEra, characterArchetypes, relationshipDynamic, tropes, conflictType: 'both', wordCountRange: '1000-2500', timeEstimate: '1-2 hours', customTheme: theme || collection?.theme } const batchPrompts = await romanceAI.generateRomancePrompts( options, 'admin', sessionId ) allPrompts.push(...batchPrompts) } // Save prompts to database const savedPrompts = [] for (const [index, prompt] of allPrompts.entries()) { const { data, error } = await supabase .from('tools_romance_prompts') .insert({ title: `${prompt.romanceSubgenre} Romance Prompt #${index + 1}`, share_code: `admin-${Date.now()}-${index}`, session_id: sessionId, is_public: true, is_reviewed: true, // Admin-generated content is pre-reviewed prompt_text: prompt.promptText, romance_subgenre: prompt.romanceSubgenre, heat_level: prompt.heatLevel, setting_era: prompt.settingEra, character_archetypes: prompt.characterArchetypes, relationship_dynamic: prompt.relationshipDynamic, tropes: prompt.tropes, conflict_type: prompt.conflictType, emotional_beats: prompt.emotionalBeats, word_count_range: prompt.wordCountRange, time_estimate: prompt.timeEstimate, content: prompt.content, seo_title: prompt.seoTitle, seo_description: prompt.seoDescription, keywords: prompt.keywords, is_ai_generated: true, ai_generation_prompt: prompt.aiGenerationPrompt, ai_confidence: prompt.aiConfidence }) .select() .single() if (!error && data) { savedPrompts.push(data) // Add to collection if specified if (collectionId) { await supabase .from('tools_romance_collection_prompts') .insert({ collection_id: collectionId, prompt_id: data.id, sort_order: index }) } } } // Update collection prompt count if (collectionId) { await supabase .from('tools_romance_prompt_collections') .update({ prompt_count: supabase.raw('prompt_count + ?', [savedPrompts.length]) }) .eq('id', collectionId) } const totalTime = Date.now() - startTime return NextResponse.json({ success: true, data: { generated: allPrompts.length, saved: savedPrompts.length, collectionId, processingTime: totalTime } }) } catch (error) { console.error('Bulk generation error:', error) return NextResponse.json( { success: false, error: 'Failed to generate prompts' }, { status: 500 } ) } } async function validateAdminAccess(request: NextRequest): Promise<boolean> { // Implement your admin authentication logic const adminKey = request.headers.get('x-admin-key') return adminKey === process.env.ADMIN_SECRET_KEY }

Frontend Components

1. Main Generator Component

// File: src/components/romance-prompts/RomancePromptGenerator.tsx 'use client' import React, { useState } from 'react' import { Button } from '@mystoryflow/ui' import { Input } from '@mystoryflow/ui' import { Select, SelectContent, SelectItem, SelectTrigger, SelectValue } from '@mystoryflow/ui' import { Textarea } from '@mystoryflow/ui' import { Checkbox } from '@mystoryflow/ui' import { Card, CardContent, CardHeader, CardTitle } from '@mystoryflow/ui' import { Badge } from '@mystoryflow/ui' import { Loader2, Heart, BookOpen, Users, Zap } from 'lucide-react' interface RomancePromptGeneratorProps { onPromptsGenerated?: (prompts: any[]) => void } export const RomancePromptGenerator: React.FC<RomancePromptGeneratorProps> = ({ onPromptsGenerated }) => { const [formData, setFormData] = useState({ count: 5, romanceSubgenre: 'contemporary', heatLevel: 'steamy', settingEra: 'modern', characterArchetypes: ['alpha_hero', 'strong_heroine'], relationshipDynamic: 'enemies_to_lovers', tropes: ['forced_proximity', 'only_one_bed'], conflictType: 'both', wordCountRange: '1000-2500', timeEstimate: '1-2 hours', customTheme: '', isPublic: false }) const [isGenerating, setIsGenerating] = useState(false) const [generatedPrompts, setGeneratedPrompts] = useState<any[]>([]) const [error, setError] = useState<string | null>(null) const subgenres = [ { value: 'contemporary', label: 'Contemporary Romance' }, { value: 'historical', label: 'Historical Romance' }, { value: 'paranormal', label: 'Paranormal Romance' }, { value: 'fantasy', label: 'Fantasy Romance' }, { value: 'western', label: 'Western Romance' }, { value: 'regency', label: 'Regency Romance' } ] const heatLevels = [ { value: 'sweet', label: 'Sweet (No explicit content)' }, { value: 'steamy', label: 'Steamy (Some heat)' }, { value: 'erotic', label: 'Erotic (Explicit content)' } ] const archetypes = [ 'alpha_hero', 'beta_hero', 'strong_heroine', 'shy_heroine', 'bad_boy', 'good_girl', 'wounded_hero', 'feisty_heroine' ] const dynamics = [ { value: 'enemies_to_lovers', label: 'Enemies to Lovers' }, { value: 'friends_to_lovers', label: 'Friends to Lovers' }, { value: 'second_chance', label: 'Second Chance Romance' }, { value: 'fake_dating', label: 'Fake Dating' }, { value: 'forced_proximity', label: 'Forced Proximity' }, { value: 'workplace_romance', label: 'Workplace Romance' } ] const availableTropes = [ 'instalove', 'slow_burn', 'forbidden_love', 'only_one_bed', 'marriage_of_convenience', 'secret_baby', 'amnesia', 'bodyguard' ] const handleInputChange = (field: string, value: any) => { setFormData(prev => ({ ...prev, [field]: value })) } const handleArrayChange = (field: string, value: string, checked: boolean) => { setFormData(prev => ({ ...prev, [field]: checked ? [...prev[field as keyof typeof prev] as string[], value] : (prev[field as keyof typeof prev] as string[]).filter(item => item !== value) })) } const handleGenerate = async () => { setIsGenerating(true) setError(null) try { const response = await fetch('/api/romance-prompts/generate', { method: 'POST', headers: { 'Content-Type': 'application/json', 'X-Session-ID': sessionStorage.getItem('sessionId') || 'anonymous' }, body: JSON.stringify(formData) }) const result = await response.json() if (!result.success) { throw new Error(result.error || 'Failed to generate prompts') } setGeneratedPrompts(result.data.prompts) onPromptsGenerated?.(result.data.prompts) } catch (err) { setError(err instanceof Error ? err.message : 'An error occurred') } finally { setIsGenerating(false) } } return ( <div className="max-w-4xl mx-auto space-y-8"> {/* Header */} <div className="text-center"> <h1 className="text-4xl font-bold text-slate-900 mb-4"> Romance Writing Prompts Generator </h1> <p className="text-xl text-slate-600 max-w-3xl mx-auto"> Create compelling romance writing prompts tailored to your favorite tropes, heat levels, and relationship dynamics. Perfect for writers looking for their next swoon-worthy story. </p> </div> {/* Generator Form */} <Card> <CardHeader> <CardTitle className="flex items-center gap-2"> <Heart className="w-5 h-5 text-rose-500" /> Romance Prompt Settings </CardTitle> </CardHeader> <CardContent className="space-y-6"> {/* Basic Settings */} <div className="grid md:grid-cols-2 gap-4"> <div> <label className="block text-sm font-medium mb-2"> Number of Prompts </label> <Select value={formData.count.toString()} onValueChange={(value) => handleInputChange('count', parseInt(value))} > <SelectTrigger size="sm"> <SelectValue /> </SelectTrigger> <SelectContent> <SelectItem value="3">3 prompts</SelectItem> <SelectItem value="5">5 prompts</SelectItem> <SelectItem value="7">7 prompts</SelectItem> <SelectItem value="10">10 prompts</SelectItem> </SelectContent> </Select> </div> <div> <label className="block text-sm font-medium mb-2"> Romance Subgenre </label> <Select value={formData.romanceSubgenre} onValueChange={(value) => handleInputChange('romanceSubgenre', value)} > <SelectTrigger size="sm"> <SelectValue /> </SelectTrigger> <SelectContent> {subgenres.map(genre => ( <SelectItem key={genre.value} value={genre.value}> {genre.label} </SelectItem> ))} </SelectContent> </Select> </div> </div> {/* Heat Level & Setting */} <div className="grid md:grid-cols-2 gap-4"> <div> <label className="block text-sm font-medium mb-2"> Heat Level </label> <Select value={formData.heatLevel} onValueChange={(value) => handleInputChange('heatLevel', value)} > <SelectTrigger size="sm"> <SelectValue /> </SelectTrigger> <SelectContent> {heatLevels.map(level => ( <SelectItem key={level.value} value={level.value}> {level.label} </SelectItem> ))} </SelectContent> </Select> </div> <div> <label className="block text-sm font-medium mb-2"> Relationship Dynamic </label> <Select value={formData.relationshipDynamic} onValueChange={(value) => handleInputChange('relationshipDynamic', value)} > <SelectTrigger size="sm"> <SelectValue /> </SelectTrigger> <SelectContent> {dynamics.map(dynamic => ( <SelectItem key={dynamic.value} value={dynamic.value}> {dynamic.label} </SelectItem> ))} </SelectContent> </Select> </div> </div> {/* Character Archetypes */} <div> <label className="block text-sm font-medium mb-3"> Character Archetypes </label> <div className="grid grid-cols-2 md:grid-cols-4 gap-2"> {archetypes.map(archetype => ( <div key={archetype} className="flex items-center space-x-2"> <Checkbox id={archetype} checked={formData.characterArchetypes.includes(archetype)} onCheckedChange={(checked) => handleArrayChange('characterArchetypes', archetype, checked as boolean) } /> <label htmlFor={archetype} className="text-sm"> {archetype.replace('_', ' ').replace(/\b\w/g, l => l.toUpperCase())} </label> </div> ))} </div> </div> {/* Romance Tropes */} <div> <label className="block text-sm font-medium mb-3"> Romance Tropes </label> <div className="grid grid-cols-2 md:grid-cols-4 gap-2"> {availableTropes.map(trope => ( <div key={trope} className="flex items-center space-x-2"> <Checkbox id={trope} checked={formData.tropes.includes(trope)} onCheckedChange={(checked) => handleArrayChange('tropes', trope, checked as boolean) } /> <label htmlFor={trope} className="text-sm"> {trope.replace('_', ' ').replace(/\b\w/g, l => l.toUpperCase())} </label> </div> ))} </div> </div> {/* Custom Theme */} <div> <label className="block text-sm font-medium mb-2"> Custom Theme (Optional) </label> <Input size="sm" placeholder="e.g., Valentine's Day, Christmas romance, beach vacation..." value={formData.customTheme} onChange={(e) => handleInputChange('customTheme', e.target.value)} /> </div> {/* Public Setting */} <div className="flex items-center space-x-2"> <Checkbox id="isPublic" checked={formData.isPublic} onCheckedChange={(checked) => handleInputChange('isPublic', checked)} /> <label htmlFor="isPublic" className="text-sm"> Make prompts publicly discoverable </label> </div> {/* Generate Button */} <Button onClick={handleGenerate} disabled={isGenerating || formData.characterArchetypes.length === 0 || formData.tropes.length === 0} className="w-full" size="lg" > {isGenerating ? ( <> <Loader2 className="w-4 h-4 mr-2 animate-spin" /> Generating Romance Prompts... </> ) : ( <> <Heart className="w-4 h-4 mr-2" /> Generate {formData.count} Romance Prompts </> )} </Button> {error && ( <div className="p-4 bg-red-50 border border-red-200 rounded-lg"> <p className="text-red-600 text-sm">{error}</p> </div> )} </CardContent> </Card> {/* Generated Prompts */} {generatedPrompts.length > 0 && ( <div className="space-y-6"> <h2 className="text-2xl font-bold text-slate-900"> Your Romance Writing Prompts </h2> <div className="grid gap-6"> {generatedPrompts.map((prompt, index) => ( <Card key={prompt.id} className="border-rose-200"> <CardHeader> <CardTitle className="flex items-start justify-between"> <span className="text-lg">Prompt #{index + 1}</span> <div className="flex gap-2"> <Badge variant="secondary">{prompt.romanceSubgenre}</Badge> <Badge variant="outline">{prompt.heatLevel}</Badge> </div> </CardTitle> </CardHeader> <CardContent className="space-y-4"> {/* Main Prompt */} <div className="p-4 bg-rose-50 rounded-lg"> <p className="text-slate-800 leading-relaxed"> {prompt.promptText} </p> </div> {/* Content Structure */} {prompt.content && ( <div className="space-y-3"> {prompt.content.hookLine && ( <div> <h4 className="font-medium text-slate-900 mb-1"> Opening Hook: </h4> <p className="text-slate-700 italic"> "{prompt.content.hookLine}" </p> </div> )} {prompt.content.characterSetup && ( <div> <h4 className="font-medium text-slate-900 mb-1"> Character Setup: </h4> <p className="text-slate-700"> {prompt.content.characterSetup} </p> </div> )} {prompt.content.conflictSeed && ( <div> <h4 className="font-medium text-slate-900 mb-1"> Central Conflict: </h4> <p className="text-slate-700"> {prompt.content.conflictSeed} </p> </div> )} </div> )} {/* Tropes */} <div> <h4 className="font-medium text-slate-900 mb-2"> Romance Tropes: </h4> <div className="flex flex-wrap gap-2"> {prompt.tropes.map((trope: string) => ( <Badge key={trope} variant="outline" className="text-xs"> {trope.replace('_', ' ')} </Badge> ))} </div> </div> {/* Writing Tips */} {prompt.content?.writingTips && prompt.content.writingTips.length > 0 && ( <div> <h4 className="font-medium text-slate-900 mb-2"> Writing Tips: </h4> <ul className="list-disc list-inside space-y-1 text-slate-700"> {prompt.content.writingTips.map((tip: string, tipIndex: number) => ( <li key={tipIndex} className="text-sm">{tip}</li> ))} </ul> </div> )} {/* Action Buttons */} <div className="flex gap-2 pt-4 border-t"> <Button variant="outline" size="sm"> <BookOpen className="w-4 h-4 mr-2" /> Write Response </Button> <Button variant="outline" size="sm"> <Users className="w-4 h-4 mr-2" /> Share </Button> <Button variant="outline" size="sm"> <Zap className="w-4 h-4 mr-2" /> Generate Similar </Button> </div> </CardContent> </Card> ))} </div> </div> )} </div> ) }

Summary

This comprehensive example demonstrates how the Romance Writing Prompts Generator implements all aspects of the 16-tool framework:

Database Architecture

  • 4 specialized tables with proper relationships
  • Romance-specific fields (subgenre, heat level, tropes)
  • SEO optimization and analytics tracking
  • User response system for community engagement

AI Integration

  • Multi-variant generation with romance expertise
  • Comprehensive admin tracking of all AI usage
  • Cost optimization with token counting
  • Error handling and retry logic

API Implementation

  • Standard endpoint structure with proper validation
  • Rate limiting and security measures
  • CORS support for web-app integration
  • Comprehensive error handling

Admin Management

  • Bulk generation capabilities
  • Quality review and featuring systems
  • Analytics and performance tracking
  • Collection management for themed sets

Frontend Components

  • Comprehensive form with romance-specific options
  • Real-time validation and user feedback
  • Mobile-responsive design
  • Integration with MyStoryFlow UI system

This pattern can be replicated for all 16 tools with tool-specific modifications, providing 80% code reuse while maintaining specialized functionality for each domain.