Best Practices

Optimize your integration for production use

Production-ready patterns for The Resume Parser API.

Error Handling

Implement Retries

async function parseWithRetry(input, maxRetries = 3) {
  let lastError

  for (let i = 0; i < maxRetries; i++) {
    try {
      const response = await createParseRequest(input)
      return await pollForResults(response.request_id)
    } catch (error) {
      lastError = error

      // Don't retry client errors
      if (error.status >= 400 && error.status < 500) {
        throw error
      }

      // Exponential backoff
      if (i < maxRetries - 1) {
        await sleep(Math.pow(2, i) * 1000)
      }
    }
  }

  throw lastError
}

Handle Specific Errors

try {
  const result = await parseResume(file)
} catch (error) {
  switch (error.error_code) {
    case 'INSUFFICIENT_CREDITS':
      // Redirect to billing
      break
    case 'FILE_TOO_LARGE':
      // Show error to user
      break
    case 'RATE_LIMIT_EXCEEDED':
      // Queue for later
      break
    default:
      // Log and alert
  }
}

Polling for Results

Efficient Polling

async function pollForResults(requestId, maxAttempts = 30) {
  for (let i = 0; i < maxAttempts; i++) {
    const result = await getParseResult(requestId)

    if (result.status === 'completed') {
      return result
    }

    if (result.status === 'failed') {
      throw new Error(result.error)
    }

    // Progressive delay: 1s, 2s, 3s, max 5s
    await sleep(Math.min(i + 1, 5) * 1000)
  }

  throw new Error('Parse timeout')
}

Credit Management

Monitor Balance

// Check before processing batch
const usage = await getUsage()

if (usage.credit_balance.total < estimatedCost) {
  // Alert user or pause processing
  console.warn('Low credits:', usage.credit_balance.total)
}

Track Consumption

// Log per-request cost
const before = await getUsage()
const result = await parseResume(file)
const after = await getUsage()

console.log('Credits used:', before.total - after.total)

Caching

Cache Parsed Results

// Avoid re-parsing the same resume
const cacheKey = hashFile(file)
const cached = await cache.get(cacheKey)

if (cached) {
  return cached
}

const result = await parseResume(file)
await cache.set(cacheKey, result, 86400) // 24h TTL
return result

Rate Limiting

Client-Side Throttling

import pLimit from 'p-limit'

// Max 10 concurrent requests
const limit = pLimit(10)

const results = await Promise.all(
  resumes.map(resume =>
    limit(() => parseResume(resume))
  )
)

Batch Processing

// Process in chunks
async function processBatch(resumes, batchSize = 50) {
  const results = []

  for (let i = 0; i < resumes.length; i += batchSize) {
    const batch = resumes.slice(i, i + batchSize)
    const batchResults = await Promise.all(
      batch.map(r => parseResume(r))
    )
    results.push(...batchResults)

    // Brief pause between batches
    await sleep(1000)
  }

  return results
}

Security

Protect API Keys

// Use environment variables
const API_KEY = process.env.RESUME_PARSER_API_KEY

// Never log API keys
console.log('Using key:', API_KEY.slice(0, 10) + '...')

Validate Files

// Check file size before upload
const MAX_SIZE = 10 * 1024 * 1024 // 10MB

if (file.size > MAX_SIZE) {
  throw new Error('File too large')
}

// Check file type
const ALLOWED_TYPES = ['application/pdf', 'application/vnd.openxmlformats-officedocument.wordprocessingml.document']

if (!ALLOWED_TYPES.includes(file.type)) {
  throw new Error('Invalid file type')
}

Performance

Parallel Processing

// Parse multiple resumes concurrently
const results = await Promise.all(
  resumes.map(resume => parseResume(resume))
)

Optimize Model Selection

// Use Nano for simple resumes
function selectModel(file) {
  const pageCount = getPageCount(file)

  if (pageCount <= 2) return 'nano'
  if (pageCount <= 5) return 'standard'
  return 'premium'
}

Monitoring

Track Metrics

// Log key metrics
console.log({
  timestamp: Date.now(),
  requestId: result.request_id,
  duration: result.duration_ms,
  creditsUsed: result.credits_consumed,
  model: result.model_used
})

Set Alerts

Monitor for:

  • High error rates (>5%)
  • Low credit balance (<10%)
  • Slow response times (>60s P95)
  • Failed parses