import { NextRequest, NextResponse } from 'next/server'
import { createClient } from '@supabase/supabase-js'
import { usageTracker } from '@/lib/services/usage-tracker'
const OPENAI_API_KEY = process.env.OPENAI_API_KEY
const OPENAI_URL = 'https://api.openai.com/v1/images/generations'
// Create server-side Supabase client with user session
function createServerSupabaseClient(request: Request) {
const authHeader = request.headers.get('authorization')
return createClient(
process.env.NEXT_PUBLIC_SUPABASE_URL!,
process.env.NEXT_PUBLIC_SUPABASE_ANON_KEY!,
{
auth: {
autoRefreshToken: false,
persistSession: false
},
global: {
headers: authHeader ? {
Authorization: authHeader
} : {}
}
}
)
}
export async function POST(req: NextRequest) {
let user: any = null
try {
const supabase = createServerSupabaseClient(req)
// Get current user
const { data: { user: currentUser }, error: authError } = await supabase.auth.getUser()
user = currentUser
if (authError || !user) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const {
prompt,
size = '1024x1024',
quality = 'auto',
bookId
} = await req.json()
if (!prompt || typeof prompt !== 'string') {
return NextResponse.json({ error: 'Prompt is required' }, { status: 400 })
}
if (!OPENAI_API_KEY) {
return NextResponse.json({ error: 'OpenAI API key not configured' }, { status: 500 })
}
// Validate parameters for GPT Image 1
const validSizes = ['1024x1024', '1536x1024', '1024x1536']
const validQualities = ['low', 'medium', 'high', 'auto']
if (!validSizes.includes(size)) {
return NextResponse.json({ error: 'Invalid size parameter' }, { status: 400 })
}
if (!validQualities.includes(quality)) {
return NextResponse.json({ error: 'Invalid quality parameter' }, { status: 400 })
}
// If bookId is provided, verify user has access to the book
if (bookId) {
const { data: book } = await supabase
.from('books')
.select('id')
.eq('id', bookId)
.eq('user_id', user.id)
.single()
if (!book) {
return NextResponse.json({ error: 'Book not found or access denied' }, { status: 404 })
}
}
// Call GPT-4 Image API
const openaiResponse = await fetch(OPENAI_URL, {
method: 'POST',
headers: {
'Authorization': `Bearer ${OPENAI_API_KEY}`,
'Content-Type': 'application/json'
},
body: JSON.stringify({
model: 'gpt-image-1',
prompt,
n: 1,
size,
quality
})
})
if (!openaiResponse.ok) {
const errorData = await openaiResponse.json().catch(() => ({}))
// Track failed usage
try {
await usageTracker.recordUsage({
user_id: user.id,
book_id: bookId || null,
model_name: 'gpt-image-1',
model_provider: 'openai',
request_type: 'image_generation',
success: false,
error_message: errorData.error?.message || 'OpenAI API error'
})
} catch (error) {
console.error('Failed to track failed image generation usage:', error)
}
if (openaiResponse.status === 400 && errorData.error?.code === 'content_policy_violation') {
return NextResponse.json({
error: 'Image generation failed due to content policy. Please modify your prompt and try again.'
}, { status: 400 })
}
return NextResponse.json({
error: 'Image generation failed. Please try again.'
}, { status: 500 })
}
const openaiData = await openaiResponse.json()
const generatedImage = openaiData.data[0]
// Handle different response formats - GPT Image 1 returns base64 by default
let imageUrl = generatedImage.url
if (!imageUrl && generatedImage.b64_json) {
// Convert base64 to data URL for display
imageUrl = `data:image/png;base64,${generatedImage.b64_json}`
}
if (!imageUrl) {
return NextResponse.json({
error: 'No image data returned from GPT Image 1'
}, { status: 500 })
}
// Calculate cost (approximate - GPT Image 1 pricing as of 2024)
const costUsd = quality === 'high' ? 0.080 : 0.040 // High: $0.080, Others: $0.040
// Store the generated image in our database
const { data: savedImage, error: saveError } = await supabase
.from('generated_images')
.insert({
user_id: user.id,
book_id: bookId || null,
prompt,
revised_prompt: generatedImage.revised_prompt || '',
image_url: imageUrl,
model: 'gpt-image-1',
size,
quality,
style: null, // GPT Image 1 doesn't use style parameter
cost_usd: costUsd,
metadata: {
openai_response: openaiData
}
})
.select()
.single()
// Track usage for image generation as 1 smart prompt
try {
await usageTracker.recordUsage({
user_id: user.id,
book_id: bookId || null,
model_name: 'gpt-image-1',
model_provider: 'openai',
request_type: 'image_generation',
success: true,
cost_usd: costUsd
})
} catch (error) {
console.error('Failed to track image generation usage:', error)
// Don't fail the request if usage tracking fails
}
if (saveError) {
// Still return the image even if we couldn't save it to our database
return NextResponse.json({
id: null,
prompt,
revised_prompt: generatedImage.revised_prompt || '',
image_url: imageUrl,
size,
quality,
style: null,
created_at: new Date().toISOString()
})
}
return NextResponse.json({
id: savedImage.id,
prompt: savedImage.prompt,
revised_prompt: savedImage.revised_prompt,
image_url: savedImage.image_url,
size: savedImage.size,
quality: savedImage.quality,
style: savedImage.style,
created_at: savedImage.created_at
})
} catch (error) {
// Track failed usage for unexpected errors
if (user?.id) {
try {
await usageTracker.recordUsage({
user_id: user.id,
model_name: 'gpt-image-1',
model_provider: 'openai',
request_type: 'image_generation',
success: false,
error_message: error instanceof Error ? error.message : 'Unknown error'
})
} catch (trackingError) {
console.error('Failed to track failed image generation usage:', trackingError)
}
}
return NextResponse.json({
error: 'An unexpected error occurred during image generation'
}, { status: 500 })
}
}