bookwiz.io / app / api / ai / generate-text / route.ts
route.ts
Raw
import { NextRequest, NextResponse } from 'next/server'
import { getOpenRouterModel, getModelTier } from '@/lib/config/models'
import { usageTracker } from '@/lib/services/usage-tracker'
import { createServerSupabaseClient } from '@/lib/supabase'

const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY
const OPENROUTER_URL = 'https://openrouter.ai/api/v1/chat/completions'

interface GenerateTextRequest {
  prompt: string
  model?: string
  userId?: string
  stream?: boolean
}

// Default model for text generation (fast and efficient)
const DEFAULT_GENERATE_MODEL = 'gpt-4o-mini'

export async function POST(request: NextRequest) {
  try {
    const body: GenerateTextRequest = await request.json()
    const { prompt, model = DEFAULT_GENERATE_MODEL, userId, stream = false } = body

    // Validate input
    if (!prompt) {
      return NextResponse.json(
        { error: 'Prompt is required' },
        { status: 400 }
      )
    }

    if (!OPENROUTER_API_KEY) {
      return NextResponse.json(
        { error: 'OpenRouter API key not configured' },
        { status: 500 }
      )
    }

    // Check usage limits if userId is provided
    if (userId) {
      try {
        const supabase = createServerSupabaseClient()
        
        const { data: subscription } = await supabase
          .from('subscriptions')
          .select('price_id, status')
          .eq('user_id', userId)
          .eq('status', 'active')
          .order('created_at', { ascending: false })
          .limit(1)
          .single()

        const { PRICING_TIERS } = await import('@/lib/stripe')
        let smartPromptsLimit = PRICING_TIERS.FREE.maxSmartPrompts
        let fastPromptsLimit = PRICING_TIERS.FREE.hasUnlimitedFastPrompts ? 999999 : PRICING_TIERS.FREE.maxFastPrompts

        if (subscription?.price_id) {
          const { getPlanByPriceId } = await import('@/lib/stripe')
          const plan = getPlanByPriceId(subscription.price_id)
          
          if (plan) {
            smartPromptsLimit = plan.maxSmartPrompts
            fastPromptsLimit = plan.hasUnlimitedFastPrompts ? 999999 : plan.maxFastPrompts
          }
        }

        const limitCheckResult = await usageTracker.checkRequestLimits(
          userId,
          model,
          smartPromptsLimit,
          fastPromptsLimit
        )

        if (!limitCheckResult.canProceed) {
          const modelTier = getModelTier(model)
          const tierDisplay = modelTier === 'smart' ? '🧠 Smart AI' : '⚡ Fast AI'
          
          return NextResponse.json({ 
            error: `You've reached your monthly limit of ${limitCheckResult.limit} ${tierDisplay} requests.`,
            limitExceeded: true
          }, { status: 429 })
        }
      } catch (error) {
        console.error('Error checking usage limits:', error)
        // Continue with request if limit check fails
      }
    }

    // Build generation prompt
    const systemPrompt = `You are a helpful AI writing assistant. Generate creative, engaging, and informative content based on the user's request.

IMPORTANT RULES:
1. Write content that is relevant and useful
2. Be creative but maintain coherence
3. Use appropriate tone and style for the context
4. Keep responses concise but informative
5. Avoid repetitive or generic content
6. Write in a natural, flowing style`

    const openRouterModel = getOpenRouterModel(model)

    // If streaming is requested, return a streaming response
    if (stream) {
      const encoder = new TextEncoder()
      
      const streamingResponse = new ReadableStream({
        async start(controller) {
          try {
            // Make API call to OpenRouter
            const response = await fetch(OPENROUTER_URL, {
              method: 'POST',
              headers: {
                'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
                'Content-Type': 'application/json',
                'HTTP-Referer': process.env.NEXT_PUBLIC_SITE_URL || 'http://localhost:3000',
                'X-Title': 'Bookwiz Text Generator'
              },
              body: JSON.stringify({
                model: openRouterModel,
                messages: [
                  {
                    role: 'system',
                    content: systemPrompt
                  },
                  {
                    role: 'user',
                    content: prompt
                  }
                ],
                max_tokens: 1000,
                temperature: 0.8,
                stream: true
              })
            })

            if (!response.ok) {
              const errorData = await response.text()
              console.error('OpenRouter API error:', errorData)
              
              let errorMessage = 'Failed to generate content'
              if (response.status === 429) {
                errorMessage = 'AI model is rate limited. Please try again in a moment.'
              } else if (response.status === 401) {
                errorMessage = 'API authentication failed. Please check configuration.'
              } else if (response.status >= 500) {
                errorMessage = 'AI service is temporarily unavailable. Please try again.'
              }
              
              controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: errorMessage })}\n\n`))
              controller.close()
              return
            }

            const reader = response.body?.getReader()
            if (!reader) {
              controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: 'No response body' })}\n\n`))
              controller.close()
              return
            }

            const decoder = new TextDecoder()
            let buffer = ''
            let fullContent = ''
            let totalTokens = 0
            let promptTokens = 0
            let completionTokens = 0

            while (true) {
              const { done, value } = await reader.read()
              if (done) break

              const chunk = decoder.decode(value, { stream: true })
              buffer += chunk

              const lines = buffer.split('\n')
              buffer = lines.pop() || ''

              for (const line of lines) {
                if (line.startsWith('data: ')) {
                  const data = line.slice(6).trim()
                  if (!data || data === '[DONE]') continue

                  try {
                    const parsed = JSON.parse(data)
                    const delta = parsed.choices?.[0]?.delta

                    // Handle content streaming
                    if (delta?.content) {
                      fullContent += delta.content
                      controller.enqueue(encoder.encode(`data: ${JSON.stringify({ content: delta.content })}\n\n`))
                    }

                    // Capture usage statistics
                    if (parsed.usage) {
                      totalTokens = parsed.usage.total_tokens || 0
                      promptTokens = parsed.usage.prompt_tokens || 0
                      completionTokens = parsed.usage.completion_tokens || 0
                    }
                  } catch (e) {
                    // Skip invalid JSON
                  }
                }
              }
            }

            // Track usage if userId is provided
            if (userId && fullContent) {
              try {
                const getModelProvider = (modelName: string): string => {
                  if (modelName.includes('openai') || modelName.includes('gpt')) return 'openai'
                  if (modelName.includes('anthropic') || modelName.includes('claude')) return 'anthropic'
                  if (modelName.includes('google') || modelName.includes('gemini')) return 'google'
                  if (modelName.includes('meta') || modelName.includes('llama')) return 'meta'
                  if (modelName.includes('mistral')) return 'mistral'
                  return 'openrouter'
                }

                await usageTracker.recordUsage({
                  user_id: userId,
                  model_name: model,
                  model_provider: getModelProvider(openRouterModel),
                  prompt_tokens: promptTokens || 0,
                  completion_tokens: completionTokens || fullContent.split(' ').length,
                  total_tokens: totalTokens || 0,
                  request_type: 'text_generation',
                  success: true
                })
              } catch (error) {
                console.error('Error tracking usage:', error)
              }
            }

            // Send completion signal
            controller.enqueue(encoder.encode(`data: ${JSON.stringify({ done: true })}\n\n`))
            controller.close()

          } catch (error) {
            console.error('AI generation streaming error:', error)
            controller.enqueue(encoder.encode(`data: ${JSON.stringify({ error: 'Failed to generate content' })}\n\n`))
            controller.close()
          }
        }
      })

      return new Response(streamingResponse, {
        headers: {
          'Content-Type': 'text/event-stream',
          'Cache-Control': 'no-cache',
          'Connection': 'keep-alive',
        },
      })
    }

    // Non-streaming response (existing logic)
    // Make API call to OpenRouter
    const response = await fetch(OPENROUTER_URL, {
      method: 'POST',
      headers: {
        'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
        'Content-Type': 'application/json',
        'HTTP-Referer': process.env.NEXT_PUBLIC_SITE_URL || 'http://localhost:3000',
        'X-Title': 'Bookwiz Text Generator'
      },
      body: JSON.stringify({
        model: openRouterModel,
        messages: [
          {
            role: 'system',
            content: systemPrompt
          },
          {
            role: 'user',
            content: prompt
          }
        ],
        max_tokens: 1000,
        temperature: 0.8,
      })
    })

    if (!response.ok) {
      const errorData = await response.text()
      console.error('OpenRouter API error:', errorData)
      
      let errorMessage = 'Failed to generate content'
      if (response.status === 429) {
        errorMessage = 'AI model is rate limited. Please try again in a moment.'
      } else if (response.status === 401) {
        errorMessage = 'API authentication failed. Please check configuration.'
      } else if (response.status >= 500) {
        errorMessage = 'AI service is temporarily unavailable. Please try again.'
      }
      
      return NextResponse.json({ error: errorMessage }, { status: response.status })
    }

    const data = await response.json()
    const generatedText = data.choices?.[0]?.message?.content?.trim()

    if (!generatedText) {
      return NextResponse.json(
        { error: 'No content generated from AI model' },
        { status: 500 }
      )
    }

    // Track usage if userId is provided
    if (userId) {
      try {
        const getModelProvider = (modelName: string): string => {
          if (modelName.includes('openai') || modelName.includes('gpt')) return 'openai'
          if (modelName.includes('anthropic') || modelName.includes('claude')) return 'anthropic'
          if (modelName.includes('google') || modelName.includes('gemini')) return 'google'
          if (modelName.includes('meta') || modelName.includes('llama')) return 'meta'
          if (modelName.includes('mistral')) return 'mistral'
          return 'openrouter'
        }

        await usageTracker.recordUsage({
          user_id: userId,
          model_name: model,
          model_provider: getModelProvider(openRouterModel),
          prompt_tokens: data.usage?.prompt_tokens || 0,
          completion_tokens: data.usage?.completion_tokens || generatedText.split(' ').length,
          total_tokens: data.usage?.total_tokens || 0,
          request_type: 'text_generation',
          success: true
        })
      } catch (error) {
        console.error('Error tracking usage:', error)
      }
    }

    return NextResponse.json({ 
      generatedText,
      model: model,
      usage: data.usage
    })

  } catch (error) {
    console.error('AI generation error:', error)
    return NextResponse.json(
      { error: 'Failed to generate content' },
      { status: 500 }
    )
  }
}