bookwiz.io / app / api / standalone-chat / route.ts
route.ts
Raw
import { NextRequest, NextResponse } from 'next/server'
import { DEFAULT_MODEL, getModelTier, getOpenRouterModel } from '@/lib/config/models'
import { usageTracker } from '@/lib/services/usage-tracker'
import { createServerSupabaseClient } from '@/lib/supabase'

const OPENROUTER_API_KEY = process.env.OPENROUTER_API_KEY
const OPENROUTER_URL = 'https://openrouter.ai/api/v1/chat/completions'

export async function POST(req: NextRequest) {
  try {
    const { messages: initialUserMessages, model = DEFAULT_MODEL.name, userId, chatId } = await req.json()

    if (!initialUserMessages || initialUserMessages.length === 0) {
      return NextResponse.json({ error: 'No messages provided' }, { status: 400 })
    }

    if (!userId) {
      return NextResponse.json({ error: 'User ID required' }, { status: 400 })
    }

    // Check usage limits
    try {
      const supabase = createServerSupabaseClient()
      
      const { data: subscription } = await supabase
        .from('subscriptions')
        .select('price_id, status')
        .eq('user_id', userId)
        .eq('status', 'active')
        .order('created_at', { ascending: false })
        .limit(1)
        .single()

      const { PRICING_TIERS } = await import('@/lib/stripe')
              let smartPromptsLimit = PRICING_TIERS.FREE.maxSmartPrompts
        let fastPromptsLimit = PRICING_TIERS.FREE.hasUnlimitedFastPrompts ? 999999 : PRICING_TIERS.FREE.maxFastPrompts

        if (subscription?.price_id) {
          const { getPlanByPriceId } = await import('@/lib/stripe')
          const plan = getPlanByPriceId(subscription.price_id)
          
          if (plan) {
            smartPromptsLimit = plan.maxSmartPrompts
            fastPromptsLimit = plan.hasUnlimitedFastPrompts ? 999999 : plan.maxFastPrompts
          }
        }

        const limitCheckResult = await usageTracker.checkRequestLimits(
          userId,
          model,
          smartPromptsLimit,
          fastPromptsLimit
        )

        if (!limitCheckResult.canProceed) {
          const modelTier = getModelTier(model)
          const tierDisplay = modelTier === 'smart' ? '🧠 Smart AI' : '⚡ Fast AI'
        
        return NextResponse.json({ 
          error: `You've reached your monthly limit of ${limitCheckResult.limit} ${tierDisplay} requests.`,
          limitExceeded: true
        }, { status: 429 })
      }
    } catch (error) {
      console.error('Error checking usage limits:', error)
    }

    if (req.signal.aborted) {
      return NextResponse.json({ error: 'Request cancelled' }, { status: 499 })
    }

    const supabase = createServerSupabaseClient()
    const lastUserMessage = initialUserMessages[initialUserMessages.length - 1]

    // Create or update chat if needed
    let currentChatId = chatId
    if (!currentChatId) {
      const { data: newChat, error: chatError } = await supabase
        .from('chats')
        .insert({
          user_id: userId,
          chat_type: 'standalone',
          title: 'New Chat',
          model: model,
          book_id: null
        })
        .select()
        .single()

      if (chatError) {
        console.error('Error creating chat:', chatError)
        return NextResponse.json({ error: 'Failed to create chat' }, { status: 500 })
      }
      currentChatId = newChat.id
    }

    // Save user message
    await supabase
      .from('messages')
      .insert({
        chat_id: currentChatId,
        type: 'user',
        content: lastUserMessage.content,
        sequence_number: initialUserMessages.length
      })

    // Create streaming response
    const stream = new ReadableStream({
      async start(controller) {
        let fullResponse = ''
        let promptTokens = 0
        let completionTokens = 0
        let totalTokens = 0

        try {
          if (!OPENROUTER_API_KEY) {
            throw new Error('OpenRouter API key not configured')
          }

          // Prepare messages for OpenRouter
          const conversationMessages = initialUserMessages.map((msg: any) => ({
            role: msg.type === 'user' ? 'user' : 'assistant',
            content: msg.content
          }))

          // Add system message for standalone chat
          conversationMessages.unshift({
            role: 'system',
            content: `You are a helpful AI assistant. You can help with various tasks including writing, brainstorming, answering questions, creative projects, and general conversation. Be conversational, helpful, and engaging.`
          })

          const openRouterModel = getOpenRouterModel(model)

          // Make API call to OpenRouter
          const response = await fetch(OPENROUTER_URL, {
            method: 'POST',
            headers: {
              'Authorization': `Bearer ${OPENROUTER_API_KEY}`,
              'Content-Type': 'application/json',
              'HTTP-Referer': process.env.NEXT_PUBLIC_SITE_URL || 'http://localhost:3000',
              'X-Title': 'Bookwiz'
            },
            body: JSON.stringify({
              model: openRouterModel,
              messages: conversationMessages,
              stream: true,
              temperature: 0.7,
              max_tokens: 2000
            }),
            signal: req.signal
          })

          if (!response.ok) {
            const errorData = await response.text()
            console.error('OpenRouter API error:', errorData)
            
            let errorMessage = 'Failed to get response from AI model'
            if (response.status === 429) {
              errorMessage = 'AI model is rate limited. Please try again in a moment.'
            } else if (response.status === 401) {
              errorMessage = 'API authentication failed. Please check configuration.'
            } else if (response.status >= 500) {
              errorMessage = 'AI service is temporarily unavailable. Please try again.'
            }
            
            throw new Error(errorMessage)
          }

          const reader = response.body?.getReader()
          if (!reader) {
            throw new Error('No response body')
          }

          const decoder = new TextDecoder()
          
          while (true) {
            const { done, value } = await reader.read()
            if (done) break

            // Check if request was aborted
            if (req.signal.aborted) {
              break
            }

            const chunk = decoder.decode(value, { stream: true })
            const lines = chunk.split('\n')
            
            for (const line of lines) {
              if (line.startsWith('data: ')) {
                const data = line.slice(6).trim()
                if (data === '[DONE]') continue
                
                try {
                  const parsed = JSON.parse(data)
                  
                  // Handle different response formats from OpenRouter
                  if (parsed.choices && parsed.choices[0]) {
                    const choice = parsed.choices[0]
                    
                    if (choice.delta?.content) {
                      const content = choice.delta.content
                      fullResponse += content
                      
                      // Send content chunk to client
                      controller.enqueue(new TextEncoder().encode(`data: ${JSON.stringify({
                        type: 'content',
                        content: content
                      })}\n\n`))
                    }
                    
                    // Extract usage information if available
                    if (parsed.usage) {
                      promptTokens = parsed.usage.prompt_tokens || 0
                      completionTokens = parsed.usage.completion_tokens || 0
                      totalTokens = parsed.usage.total_tokens || 0
                    }
                  }
                } catch (e) {
                  // Skip invalid JSON
                  console.log('Skipping invalid JSON:', data)
                }
              }
            }
          }

          // Save AI response to database
          await supabase
            .from('messages')
            .insert({
              chat_id: currentChatId,
              type: 'ai',
              content: fullResponse,
              model: model,
              sequence_number: initialUserMessages.length + 1
            })

          // Update chat title if new
          if (!chatId && fullResponse) {
            const chatTitle = fullResponse.length > 50 
              ? fullResponse.substring(0, 47) + '...' 
              : fullResponse || 'New Chat'
            
            await supabase
              .from('chats')
              .update({ title: chatTitle })
              .eq('id', currentChatId)
          }

          // Track usage
          if (userId) {
            try {
              const getModelProvider = (modelName: string): string => {
                if (modelName.includes('openai') || modelName.includes('gpt')) return 'openai'
                if (modelName.includes('anthropic') || modelName.includes('claude')) return 'anthropic'
                if (modelName.includes('google') || modelName.includes('gemini')) return 'google'
                if (modelName.includes('meta') || modelName.includes('llama')) return 'meta'
                if (modelName.includes('mistral')) return 'mistral'
                return 'openrouter'
              }

              await usageTracker.recordUsage({
                user_id: userId,
                model_name: model,
                model_provider: getModelProvider(openRouterModel),
                prompt_tokens: promptTokens,
                completion_tokens: completionTokens || fullResponse.split(' ').length,
                total_tokens: totalTokens || (promptTokens + (completionTokens || fullResponse.split(' ').length)),
                request_type: 'chat',
                success: true
              })
            } catch (error) {
              console.error('Error tracking usage:', error)
            }
          }

          // Send completion signal
          controller.enqueue(new TextEncoder().encode(`data: ${JSON.stringify({
            type: 'done',
            chatId: currentChatId
          })}\n\n`))

        } catch (error: any) {
          console.error('Error in streaming response:', error)
          
          // Track failed usage
          if (userId) {
            try {
                           await usageTracker.recordUsage({
               user_id: userId,
               model_name: model,
               model_provider: 'openrouter',
               success: false,
               error_message: error.message,
               request_type: 'chat'
             })
            } catch (trackingError) {
              console.error('Error tracking failed usage:', trackingError)
            }
          }

          controller.enqueue(new TextEncoder().encode(`data: ${JSON.stringify({
            type: 'error',
            error: error instanceof Error ? error.message : 'Unknown error'
          })}\n\n`))
        } finally {
          controller.close()
        }
      }
    })

    return new Response(stream, {
      headers: {
        'Content-Type': 'text/event-stream',
        'Cache-Control': 'no-cache',
        'Connection': 'keep-alive',
      },
    })

  } catch (error) {
    console.error('Error in standalone chat API:', error)
    return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
  }
}

// GET endpoint to fetch standalone chats
export async function GET(req: NextRequest) {
  try {
    const { searchParams } = new URL(req.url)
    const userId = searchParams.get('userId')
    
    if (!userId) {
      return NextResponse.json({ error: 'User ID required' }, { status: 400 })
    }

    const supabase = createServerSupabaseClient()

    const { data: chats, error } = await supabase
      .from('chats')
      .select('*')
      .eq('user_id', userId)
      .eq('chat_type', 'standalone')
      .order('updated_at', { ascending: false })

    if (error) {
      console.error('Error fetching standalone chats:', error)
      return NextResponse.json({ error: 'Failed to fetch chats' }, { status: 500 })
    }

    return NextResponse.json({ chats })

  } catch (error) {
    console.error('Error in standalone chat GET:', error)
    return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
  }
}

// DELETE endpoint
export async function DELETE(req: NextRequest) {
  try {
    const { searchParams } = new URL(req.url)
    const chatId = searchParams.get('chatId')
    const userId = searchParams.get('userId')
    
    if (!chatId || !userId) {
      return NextResponse.json({ error: 'Chat ID and User ID required' }, { status: 400 })
    }

    const supabase = createServerSupabaseClient()

    const { error } = await supabase
      .from('chats')
      .delete()
      .eq('id', chatId)
      .eq('user_id', userId)
      .eq('chat_type', 'standalone')

    if (error) {
      console.error('Error deleting standalone chat:', error)
      return NextResponse.json({ error: 'Failed to delete chat' }, { status: 500 })
    }

    return NextResponse.json({ success: true })

  } catch (error) {
    console.error('Error in standalone chat DELETE:', error)
    return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
  }
}