import { NextRequest, NextResponse } from 'next/server'
import { getOpenRouterModel, getModelTier, MODELS } from '@/lib/config/models'
import { usageTracker } from '@/lib/services/usage-tracker'
import { createServerSupabaseClient } from '@/lib/supabase'
const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY
const OPENROUTER_URL = 'https://openrouter.ai/api/v1/chat/completions'
interface AIEditRequest {
context: {
preceding: string
selected: string
following: string
formatting?: string
}
instruction: string
model?: string
userId?: string
stream?: boolean
}
// Default model for text editing (fast and efficient)
const DEFAULT_EDIT_MODEL = 'gpt-4o-mini'
export async function POST(request: NextRequest) {
try {
const body: AIEditRequest = await request.json()
const { context, instruction, model = DEFAULT_EDIT_MODEL, userId, stream = false } = body
// Validate input
if (!context.selected || !instruction) {
return NextResponse.json(
{ error: 'Selected text and instruction are required' },
{ status: 400 }
)
}
if (!OPENROUTER_API_KEY) {
return NextResponse.json(
{ error: 'OpenRouter API key not configured' },
{ status: 500 }
)
}
// Check usage limits if userId is provided
if (userId) {
try {
const supabase = createServerSupabaseClient()
const { data: subscription } = await supabase
.from('subscriptions')
.select('price_id, status')
.eq('user_id', userId)
.eq('status', 'active')
.order('created_at', { ascending: false })
.limit(1)
.single()
const { PRICING_TIERS } = await import('@/lib/stripe')
let smartPromptsLimit = PRICING_TIERS.FREE.maxSmartPrompts
let fastPromptsLimit = PRICING_TIERS.FREE.hasUnlimitedFastPrompts ? 999999 : PRICING_TIERS.FREE.maxFastPrompts
if (subscription?.price_id) {
const { getPlanByPriceId } = await import('@/lib/stripe')
const plan = getPlanByPriceId(subscription.price_id)
if (plan) {
smartPromptsLimit = plan.maxSmartPrompts
fastPromptsLimit = plan.hasUnlimitedFastPrompts ? 999999 : plan.maxFastPrompts
}
}
const limitCheckResult = await usageTracker.checkRequestLimits(
userId,
model,
smartPromptsLimit,
fastPromptsLimit
)
if (!limitCheckResult.canProceed) {
const modelTier = getModelTier(model)
const tierDisplay = modelTier === 'smart' ? '🧠 Smart AI' : '⚡ Fast AI'
return NextResponse.json({
error: `You've reached your monthly limit of ${limitCheckResult.limit} ${tierDisplay} requests.`,
limitExceeded: true
}, { status: 429 })
}
} catch (error) {
console.error('Error checking usage limits:', error)
// Continue with request if limit check fails
}
}
// Build context-aware prompt
const systemPrompt = `You are a helpful text editor that maintains context and coherence. Your task is to edit the selected text according to the user's instruction while keeping it consistent with the surrounding context.
IMPORTANT RULES:
1. Only edit the selected text - do not modify the preceding or following text
2. Maintain the same tone and style as the surrounding context
3. Keep the edit coherent with what comes before and after
4. Return ONLY the edited text, nothing else
5. Do not add explanations, quotes, or additional formatting
6. Preserve the original meaning unless explicitly asked to change it
7. CRITICAL: Maintain the exact formatting structure (headings, lists, blockquotes, etc.) of the selected text`
const formattingInstruction = context.formatting
? `\n\nFORMATTING CONTEXT: The selected text is currently formatted as "${context.formatting}". You MUST maintain this exact formatting structure in your response:
- If it's a bullet list item, return ONLY the text content (no bullets, no dashes, no numbers)
- If it's a numbered list item, return ONLY the text content (no bullets, no dashes, no numbers)
- If it's a heading level X, return content suitable for that heading level
- If it's within a blockquote, maintain the blockquote style
- If it's a code block, maintain code formatting
- If it's a task list item, return ONLY the text content (no checkboxes, no bullets)
- If it's a table cell, return content suitable for a table cell (concise, structured)
- If it's a paragraph, return well-structured paragraph content
CRITICAL: Do not include list markers (-, *, 1., etc.) in your response when editing list items. The formatting structure is already provided by the editor.`
: ''
const userPrompt = `Context:
Preceding text: "${context.preceding}"
Selected text: "${context.selected}"
Following text: "${context.following}"
${context.formatting ? `Current formatting: ${context.formatting}` : ''}
Instruction: ${instruction}${formattingInstruction}
Please edit only the selected text while maintaining coherence with the surrounding context and preserving the formatting structure.`
const openRouterModel = getOpenRouterModel(model)
const requestStartTime = new Date()
// If streaming is requested, return a streaming response
if (stream) {
const encoder = new TextEncoder()
const streamingResponse = new ReadableStream({
async start(controller) {
try {
// Make API call to OpenRouter
const response = await fetch(OPENROUTER_URL, {
method: 'POST',
headers: {
'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
'Content-Type': 'application/json',
'HTTP-Referer': process.env.NEXT_PUBLIC_SITE_URL || 'http://localhost:3000',
'X-Title': 'Bookwiz Text Editor'
},
body: JSON.stringify({
model: openRouterModel,
messages: [
{
role: 'system',
content: systemPrompt
},
{
role: 'user',
content: userPrompt
}
],
max_tokens: 1000,
temperature: 0.7,
stream: true
})
})
if (!response.ok) {
const errorData = await response.text()
console.error('OpenRouter API error:', errorData)
let errorMessage = 'Failed to process AI edit request'
if (response.status === 429) {
errorMessage = 'AI model is rate limited. Please try again in a moment.'
} else if (response.status === 401) {
errorMessage = 'API authentication failed. Please check configuration.'
} else if (response.status >= 500) {
errorMessage = 'AI service is temporarily unavailable. Please try again.'
}
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: errorMessage })}\n\n`))
controller.close()
return
}
const reader = response.body?.getReader()
if (!reader) {
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: 'No response body' })}\n\n`))
controller.close()
return
}
const decoder = new TextDecoder()
let buffer = ''
let fullContent = ''
let totalTokens = 0
let promptTokens = 0
let completionTokens = 0
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
buffer += chunk
const lines = buffer.split('\n')
buffer = lines.pop() || ''
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6).trim()
if (!data || data === '[DONE]') continue
try {
const parsed = JSON.parse(data)
const delta = parsed.choices?.[0]?.delta
// Handle content streaming
if (delta?.content) {
fullContent += delta.content
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ content: delta.content })}\n\n`))
}
// Capture usage statistics
if (parsed.usage) {
totalTokens = parsed.usage.total_tokens || 0
promptTokens = parsed.usage.prompt_tokens || 0
completionTokens = parsed.usage.completion_tokens || 0
}
} catch (e) {
// Skip invalid JSON
}
}
}
}
// Track usage if userId is provided
if (userId && fullContent) {
try {
const getModelProvider = (modelName: string): string => {
if (modelName.includes('openai') || modelName.includes('gpt')) return 'openai'
if (modelName.includes('anthropic') || modelName.includes('claude')) return 'anthropic'
if (modelName.includes('google') || modelName.includes('gemini')) return 'google'
if (modelName.includes('meta') || modelName.includes('llama')) return 'meta'
if (modelName.includes('mistral')) return 'mistral'
return 'openrouter'
}
await usageTracker.recordUsage({
user_id: userId,
model_name: model,
model_provider: getModelProvider(openRouterModel),
prompt_tokens: promptTokens || 0,
completion_tokens: completionTokens || fullContent.split(' ').length,
total_tokens: totalTokens || 0,
request_type: 'text_edit',
success: true
})
} catch (error) {
console.error('Error tracking usage:', error)
}
}
// Send completion signal
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`))
controller.close()
} catch (error) {
console.error('AI edit streaming error:', error)
controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: 'Failed to process AI edit request' })}\n\n`))
controller.close()
}
}
})
return new Response(streamingResponse, {
headers: {
'Content-Type': 'text/event-stream',
'Cache-Control': 'no-cache',
'Connection': 'keep-alive',
},
})
}
// Non-streaming response (existing logic)
// Make API call to OpenRouter
const response = await fetch(OPENROUTER_URL, {
method: 'POST',
headers: {
'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
'Content-Type': 'application/json',
'HTTP-Referer': process.env.NEXT_PUBLIC_SITE_URL || 'http://localhost:3000',
'X-Title': 'Bookwiz Text Editor'
},
body: JSON.stringify({
model: openRouterModel,
messages: [
{
role: 'system',
content: systemPrompt
},
{
role: 'user',
content: userPrompt
}
],
max_tokens: 1000,
temperature: 0.7,
})
})
if (!response.ok) {
const errorData = await response.text()
console.error('OpenRouter API error:', errorData)
let errorMessage = 'Failed to process AI edit request'
if (response.status === 429) {
errorMessage = 'AI model is rate limited. Please try again in a moment.'
} else if (response.status === 401) {
errorMessage = 'API authentication failed. Please check configuration.'
} else if (response.status >= 500) {
errorMessage = 'AI service is temporarily unavailable. Please try again.'
}
return NextResponse.json({ error: errorMessage }, { status: response.status })
}
const data = await response.json()
const editedText = data.choices?.[0]?.message?.content?.trim()
if (!editedText) {
return NextResponse.json(
{ error: 'No response received from AI model' },
{ status: 500 }
)
}
// Track usage if userId is provided
if (userId) {
try {
const getModelProvider = (modelName: string): string => {
if (modelName.includes('openai') || modelName.includes('gpt')) return 'openai'
if (modelName.includes('anthropic') || modelName.includes('claude')) return 'anthropic'
if (modelName.includes('google') || modelName.includes('gemini')) return 'google'
if (modelName.includes('meta') || modelName.includes('llama')) return 'meta'
if (modelName.includes('mistral')) return 'mistral'
return 'openrouter'
}
await usageTracker.recordUsage({
user_id: userId,
model_name: model,
model_provider: getModelProvider(openRouterModel),
prompt_tokens: data.usage?.prompt_tokens || 0,
completion_tokens: data.usage?.completion_tokens || editedText.split(' ').length,
total_tokens: data.usage?.total_tokens || 0,
request_type: 'text_edit',
success: true
})
} catch (error) {
console.error('Error tracking usage:', error)
}
}
return NextResponse.json({
editedText,
model: model,
usage: data.usage
})
} catch (error) {
console.error('AI edit error:', error)
return NextResponse.json(
{ error: 'Failed to process AI edit request' },
{ status: 500 }
)
}
}