F029 - Comprehensive Testing Framework
Objective
Implement a robust testing framework covering unit tests, integration tests, and end-to-end tests to ensure reliability, performance, and maintainability of the manuscript analysis platform.
Quick Implementation
Using NextSaaS Testing Patterns
- Jest configuration
- React Testing Library setup
- Playwright for E2E
- Test utilities and mocks
- CI/CD integration
New Requirements
- AI service mocking
- File upload testing
- Real-time features testing
- Performance benchmarks
MVP Implementation
1. Testing Infrastructure Setup
// package.json
{
"scripts": {
"test": "jest",
"test:watch": "jest --watch",
"test:coverage": "jest --coverage",
"test:e2e": "playwright test",
"test:e2e:ui": "playwright test --ui",
"test:integration": "jest --testPathPattern=integration",
"test:ci": "npm run test:coverage && npm run test:e2e"
},
"devDependencies": {
"@testing-library/react": "^14.0.0",
"@testing-library/jest-dom": "^6.0.0",
"@testing-library/user-event": "^14.0.0",
"@playwright/test": "^1.40.0",
"jest": "^29.7.0",
"jest-environment-jsdom": "^29.7.0",
"msw": "^2.0.0",
"@types/jest": "^29.5.0"
}
}2. Jest Configuration
// jest.config.js
const nextJest = require('next/jest')
const createJestConfig = nextJest({
dir: './'
})
const customJestConfig = {
setupFilesAfterEnv: ['<rootDir>/jest.setup.js'],
moduleNameMapper: {
'^@/(.*)$': '<rootDir>/src/$1',
'^@mystoryflow/(.*)$': '<rootDir>/packages/$1'
},
testEnvironment: 'jest-environment-jsdom',
testPathIgnorePatterns: ['<rootDir>/.next/', '<rootDir>/node_modules/'],
coverageThreshold: {
global: {
branches: 80,
functions: 80,
lines: 80,
statements: 80
}
},
collectCoverageFrom: [
'src/**/*.{js,jsx,ts,tsx}',
'packages/**/*.{js,jsx,ts,tsx}',
'!src/**/*.d.ts',
'!src/**/*.stories.tsx',
'!src/**/index.ts'
]
}
module.exports = createJestConfig(customJestConfig)3. Test Setup and Utilities
// jest.setup.js
import '@testing-library/jest-dom'
import { server } from './src/test/mocks/server'
// Establish API mocking before all tests
beforeAll(() => server.listen())
// Reset any request handlers that we may add during the tests,
// so they don't affect other tests
afterEach(() => server.resetHandlers())
// Clean up after the tests are finished
afterAll(() => server.close())
// Mock environment variables
process.env.NEXT_PUBLIC_SUPABASE_URL = 'http://localhost:54321'
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY = 'test-key'
process.env.OPENAI_API_KEY = 'test-openai-key'
process.env.ANTHROPIC_API_KEY = 'test-anthropic-key'4. AI Service Mocks
// src/test/mocks/ai-service.ts
import { rest } from 'msw'
export const aiHandlers = [
// OpenAI Mock
rest.post('https://api.openai.com/v1/chat/completions', (req, res, ctx) => {
return res(
ctx.json({
id: 'test-completion-id',
object: 'chat.completion',
created: Date.now(),
model: 'gpt-4',
choices: [{
index: 0,
message: {
role: 'assistant',
content: JSON.stringify({
overall_score: 85,
category_scores: {
structure: 88,
character_development: 82,
dialogue: 90,
pacing: 78,
plot_and_conflict: 85,
writing_craft: 87
},
strengths: [
'Compelling protagonist with clear motivations',
'Natural, engaging dialogue',
'Well-structured three-act format'
],
weaknesses: [
'Middle section pacing could be tighter',
'Secondary characters need more development'
]
})
},
finish_reason: 'stop'
}]
})
)
}),
// Claude Mock
rest.post('https://api.anthropic.com/v1/messages', (req, res, ctx) => {
return res(
ctx.json({
id: 'test-claude-id',
type: 'message',
role: 'assistant',
content: [{
type: 'text',
text: JSON.stringify({
genre: 'mystery',
confidence: 0.92,
subgenres: ['cozy_mystery', 'amateur_sleuth'],
reasoning: 'Small town setting with amateur detective protagonist'
})
}]
})
)
})
]5. Component Unit Tests
// src/components/analysis/ScoreOverview.test.tsx
import { render, screen } from '@testing-library/react'
import { ScoreOverview } from './ScoreOverview'
describe('ScoreOverview', () => {
const mockProps = {
overallScore: 85,
categoryScores: {
structure: 88,
character_development: 82,
dialogue: 90,
pacing: 78
},
publishingReadiness: {
level: 'ready',
description: 'Your manuscript is ready for submission',
timeToMarket: '1-2 months'
}
}
it('renders overall score correctly', () => {
render(<ScoreOverview {...mockProps} />)
expect(screen.getByText('85%')).toBeInTheDocument()
expect(screen.getByText('Excellent')).toBeInTheDocument()
})
it('displays correct score color based on value', () => {
const { rerender } = render(<ScoreOverview {...mockProps} />)
const scoreElement = screen.getByText('85%')
expect(scoreElement).toHaveStyle({ color: '#10b981' }) // green
rerender(<ScoreOverview {...mockProps} overallScore={65} />)
expect(screen.getByText('65%')).toHaveStyle({ color: '#f59e0b' }) // yellow
})
it('shows all category scores', () => {
render(<ScoreOverview {...mockProps} />)
expect(screen.getByText('STRUCTURE')).toBeInTheDocument()
expect(screen.getByText('88%')).toBeInTheDocument()
expect(screen.getByText('CHARACTER DEVELOPMENT')).toBeInTheDocument()
expect(screen.getByText('82%')).toBeInTheDocument()
})
it('displays publishing readiness badge correctly', () => {
render(<ScoreOverview {...mockProps} />)
expect(screen.getByText('READY')).toBeInTheDocument()
expect(screen.getByText('1-2 months')).toBeInTheDocument()
})
})6. Service Integration Tests
// src/services/__tests__/manuscript-analysis.integration.test.ts
import { ManuscriptAnalysisEngine } from '@mystoryflow/manuscript-analysis'
import { createClient } from '@mystoryflow/database/server'
import { mockManuscriptContent } from '@/test/fixtures/manuscripts'
jest.mock('@mystoryflow/database/server')
describe('ManuscriptAnalysisEngine Integration', () => {
let engine: ManuscriptAnalysisEngine
let mockSupabase: any
beforeEach(() => {
mockSupabase = {
from: jest.fn().mockReturnThis(),
select: jest.fn().mockReturnThis(),
insert: jest.fn().mockReturnThis(),
update: jest.fn().mockReturnThis(),
eq: jest.fn().mockReturnThis(),
single: jest.fn()
}
;(createClient as jest.Mock).mockReturnValue(mockSupabase)
engine = new ManuscriptAnalysisEngine()
})
it('analyzes manuscript and stores results', async () => {
const manuscriptId = 'test-manuscript-id'
const userId = 'test-user-id'
mockSupabase.single.mockResolvedValueOnce({
data: { content: mockManuscriptContent }
})
const result = await engine.analyzeManuscript(manuscriptId, userId)
expect(result).toMatchObject({
overall_score: expect.any(Number),
category_scores: expect.any(Object),
strengths: expect.any(Array),
weaknesses: expect.any(Array)
})
expect(mockSupabase.insert).toHaveBeenCalledWith(
expect.objectContaining({
manuscript_id: manuscriptId,
overall_score: expect.any(Number)
})
)
})
it('handles analysis errors gracefully', async () => {
mockSupabase.single.mockRejectedValueOnce(new Error('Database error'))
await expect(
engine.analyzeManuscript('bad-id', 'user-id')
).rejects.toThrow('Failed to retrieve manuscript')
})
it('respects word count limits', async () => {
const longContent = 'word '.repeat(200000) // 200k words
mockSupabase.single.mockResolvedValueOnce({
data: { content: longContent }
})
await expect(
engine.analyzeManuscript('test-id', 'user-id')
).rejects.toThrow('Manuscript exceeds maximum word count')
})
})7. E2E Tests with Playwright
// e2e/manuscript-upload.spec.ts
import { test, expect } from '@playwright/test'
import path from 'path'
test.describe('Manuscript Upload Flow', () => {
test.beforeEach(async ({ page }) => {
// Login
await page.goto('/auth/sign-in')
await page.fill('[name="email"]', 'test@example.com')
await page.fill('[name="password"]', 'testpassword')
await page.click('[type="submit"]')
await page.waitForURL('/dashboard')
})
test('uploads and analyzes manuscript successfully', async ({ page }) => {
await page.goto('/manuscripts/new')
// Upload file
const fileInput = page.locator('input[type="file"]')
await fileInput.setInputFiles(
path.join(__dirname, 'fixtures', 'sample-manuscript.pdf')
)
// Fill metadata
await page.fill('[name="title"]', 'Test Manuscript')
await page.selectOption('[name="genre"]', 'mystery')
await page.fill('[name="wordCount"]', '75000')
// Submit
await page.click('button:has-text("Start Analysis")')
// Wait for upload
await expect(page.locator('[data-testid="upload-progress"]'))
.toHaveAttribute('aria-valuenow', '100')
// Wait for analysis to complete
await page.waitForURL(/\/manuscripts\/.*\/analysis/, { timeout: 60000 })
// Verify results page
await expect(page.locator('h1')).toContainText('Analysis Complete')
await expect(page.locator('[data-testid="overall-score"]')).toBeVisible()
await expect(page.locator('[data-testid="download-report"]')).toBeEnabled()
})
test('handles upload errors gracefully', async ({ page }) => {
await page.goto('/manuscripts/new')
// Try to upload invalid file type
const fileInput = page.locator('input[type="file"]')
await fileInput.setInputFiles(
path.join(__dirname, 'fixtures', 'invalid-file.exe')
)
await expect(page.locator('[role="alert"]'))
.toContainText('Please upload a valid document')
})
test('enforces word count limits', async ({ page }) => {
await page.goto('/manuscripts/new')
// Upload file
await page.locator('input[type="file"]').setInputFiles(
path.join(__dirname, 'fixtures', 'sample-manuscript.pdf')
)
// Set word count beyond limit
await page.fill('[name="wordCount"]', '500000')
const submitButton = page.locator('button:has-text("Start Analysis")')
await expect(submitButton).toBeDisabled()
await expect(page.locator('[data-testid="limit-warning"]'))
.toContainText('exceeds the maximum')
})
})8. Performance Testing
// e2e/performance.spec.ts
import { test, expect } from '@playwright/test'
test.describe('Performance Benchmarks', () => {
test('dashboard loads within performance budget', async ({ page }) => {
await page.goto('/dashboard')
const metrics = await page.evaluate(() => {
const navigation = performance.getEntriesByType('navigation')[0] as PerformanceNavigationTiming
return {
domContentLoaded: navigation.domContentLoadedEventEnd - navigation.domContentLoadedEventStart,
loadComplete: navigation.loadEventEnd - navigation.loadEventStart,
firstContentfulPaint: performance.getEntriesByName('first-contentful-paint')[0]?.startTime
}
})
expect(metrics.domContentLoaded).toBeLessThan(1000) // 1s
expect(metrics.loadComplete).toBeLessThan(3000) // 3s
expect(metrics.firstContentfulPaint).toBeLessThan(1500) // 1.5s
})
test('analysis page handles large content efficiently', async ({ page }) => {
await page.goto('/manuscripts/test-id/analysis')
// Measure time to interactive
const startTime = Date.now()
await page.waitForLoadState('networkidle')
const loadTime = Date.now() - startTime
expect(loadTime).toBeLessThan(5000) // 5s max
// Check memory usage
const metrics = await page.metrics()
expect(metrics.JSHeapUsedSize).toBeLessThan(50 * 1024 * 1024) // 50MB
})
})9. Accessibility Testing
// src/components/__tests__/accessibility.test.tsx
import { render } from '@testing-library/react'
import { axe, toHaveNoViolations } from 'jest-axe'
import { ManuscriptUploader } from '@/components/upload/ManuscriptUploader'
import { AnalysisReport } from '@/components/analysis/AnalysisReport'
expect.extend(toHaveNoViolations)
describe('Accessibility Tests', () => {
it('ManuscriptUploader has no accessibility violations', async () => {
const { container } = render(
<ManuscriptUploader onUpload={jest.fn()} />
)
const results = await axe(container)
expect(results).toHaveNoViolations()
})
it('AnalysisReport has no accessibility violations', async () => {
const mockData = {
overall_score: 85,
category_scores: { structure: 90 },
strengths: ['Good pacing'],
weaknesses: ['Character depth']
}
const { container } = render(
<AnalysisReport data={mockData} />
)
const results = await axe(container)
expect(results).toHaveNoViolations()
})
it('forms have proper labels and ARIA attributes', async () => {
const { getByLabelText } = render(
<ManuscriptUploader onUpload={jest.fn()} />
)
expect(getByLabelText('Upload your manuscript')).toBeInTheDocument()
expect(getByLabelText('Upload your manuscript'))
.toHaveAttribute('aria-required', 'true')
})
})10. CI/CD Test Configuration
# .github/workflows/test.yml
name: Test Suite
on:
push:
branches: [main, develop]
pull_request:
branches: [main]
jobs:
test:
runs-on: ubuntu-latest
services:
postgres:
image: postgres:15
env:
POSTGRES_PASSWORD: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
steps:
- uses: actions/checkout@v3
- name: Setup Node.js
uses: actions/setup-node@v3
with:
node-version: '18'
cache: 'npm'
- name: Install dependencies
run: npm ci
- name: Run unit tests
run: npm run test:coverage
env:
DATABASE_URL: postgresql://postgres:postgres@localhost:5432/test
- name: Upload coverage
uses: codecov/codecov-action@v3
with:
file: ./coverage/lcov.info
- name: Install Playwright
run: npx playwright install --with-deps
- name: Run E2E tests
run: npm run test:e2e
env:
PLAYWRIGHT_TEST_BASE_URL: http://localhost:3000
- name: Upload test results
if: always()
uses: actions/upload-artifact@v3
with:
name: test-results
path: |
coverage/
playwright-report/
test-results/MVP Acceptance Criteria
- 80%+ code coverage across all packages
- Unit tests for all components
- Integration tests for services
- E2E tests for critical user flows
- AI service mocking
- Performance benchmarks
- Accessibility testing
- CI/CD integration
- Test documentation
- Error scenario coverage
Post-MVP Enhancements
- Visual regression testing
- Load testing with K6
- Security testing suite
- Mutation testing
- Contract testing for APIs
- Mobile device testing
- Cross-browser matrix
- Chaos engineering tests
- Test data generation
- Automated test reporting
Implementation Time
- Development: 3 days
- Testing: 1 day
- Total: 4 days
Dependencies
- Test environment setup
- Mock data creation
- CI/CD pipeline configuration
- Test database setup
Next Feature
After completion, proceed to F030-PERFORMANCE-OPTIMIZATION for speed improvements.