Common errors, debugging strategies, and solutions for the In Midst My Life platform.
Run this first to diagnose system health:
#!/bin/bash
# save as: scripts/health-check.sh
echo "🔍 Running system diagnostics..."
echo "\n1. Checking services..."
docker-compose ps
echo "\n2. Testing API health..."
curl -s http://localhost:3001/health || echo "❌ API unreachable"
curl -s http://localhost:3001/ready || echo "❌ API not ready"
echo "\n3. Testing database connection..."
docker-compose exec -T postgres pg_isready -U midstsvc || echo "❌ PostgreSQL not ready"
echo "\n4. Testing Redis..."
docker-compose exec -T redis redis-cli ping || echo "❌ Redis not responding"
echo "\n5. Checking logs for errors..."
docker-compose logs --tail=50 api | grep -i error
docker-compose logs --tail=50 orchestrator | grep -i error
echo "\n✅ Diagnostics complete"
# View all container status
docker-compose ps
# Check service logs
docker-compose logs -f api
docker-compose logs -f orchestrator
docker-compose logs --tail=100 api
# Restart specific service
docker-compose restart api
# Full restart
docker-compose down && docker-compose up -d
# Check port usage
lsof -i :3001
lsof -i :5432
ECONNREFUSED - Cannot Connect to DatabaseSymptoms:
Error: connect ECONNREFUSED 127.0.0.1:5432
Diagnosis:
# Check if PostgreSQL is running
docker-compose ps postgres
# Check PostgreSQL logs
docker-compose logs postgres
Solutions:
./scripts/dev-up.sh
# or
docker-compose up postgres -d
# Check environment variable
echo $DATABASE_URL
# Should be:
postgresql://midstsvc:password@localhost:5432/midst_dev
# For Docker services:
postgresql://midstsvc:password@postgres:5432/midst
# Check what's using port 5432
lsof -i :5432
# Change port in .env
POSTGRES_PORT=5433
# Update DATABASE_URL accordingly
# Verify network
docker network ls
docker network inspect in-midst-my-life_default
# Recreate network
docker-compose down
docker-compose up -d
relation "profiles" does not existSymptoms:
ERROR: relation "profiles" does not exist
Diagnosis:
# Connect to database
./scripts/dev-shell.sh postgres
# In psql:
\dt # List all tables
Solution:
# Run migrations
pnpm --filter @in-midst-my-life/api migrate
pnpm --filter @in-midst-my-life/orchestrator migrate
# Verify tables exist
./scripts/dev-shell.sh postgres
# In psql:
\dt
SELECT COUNT(*) FROM profiles;
Symptoms:
Migration failed: duplicate column name
Diagnosis:
# Check migration status
./scripts/dev-shell.sh postgres
# In psql:
SELECT * FROM migrations ORDER BY applied_at DESC;
Solutions:
# Rollback (if DOWN statements exist)
# Edit migration file with proper DOWN
# Re-run migration
pnpm --filter @in-midst-my-life/api migrate
# DANGER: Nuclear option (dev only)
docker-compose down -v # Removes volumes
docker-compose up postgres -d
pnpm --filter @in-midst-my-life/api migrate
pnpm --filter @in-midst-my-life/api seed
too many connectionsSymptoms:
FATAL: sorry, too many clients already
Solutions:
./scripts/dev-shell.sh postgres
# In psql:
SELECT COUNT(*) FROM pg_stat_activity;
# Kill idle connections
SELECT pg_terminate_backend(pid)
FROM pg_stat_activity
WHERE state = 'idle'
AND query_start < NOW() - INTERVAL '5 minutes';
# docker-compose.yml
postgres:
command: postgres -c max_connections=200
Redis connection failedSymptoms:
Error: Redis connection to redis:6379 failed
Diagnosis:
# Check Redis status
docker-compose ps redis
# Test connection
docker-compose exec redis redis-cli ping
Solutions:
./scripts/dev-up.sh
# or
docker-compose up redis -d
echo $REDIS_URL
# Should be:
redis://redis:6379 # From Docker containers
redis://localhost:6379 # From host machine
# If Redis has password
redis://:<password>@redis:6379
Symptoms:
Solutions:
./scripts/dev-shell.sh redis
# In redis-cli:
FLUSHDB # Clear current database
FLUSHALL # Clear all databases
./scripts/dev-shell.sh redis
# In redis-cli:
KEYS taxonomy:* # Find taxonomy keys
DEL taxonomy:masks taxonomy:epochs taxonomy:stages
# Set in .env
REDIS_URL= # Empty = use in-memory fallback
401 UnauthorizedSymptoms:
{
"error": {
"code": "UNAUTHORIZED",
"message": "Missing or invalid authentication token"
}
}
Solutions:
# Include JWT token
curl -H "Authorization: Bearer <your-jwt-token>" \
http://localhost:3001/profiles
404 Not FoundSymptoms:
{
"error": {
"code": "NOT_FOUND",
"message": "Profile not found"
}
}
Diagnosis:
# Check if resource exists in database
./scripts/dev-shell.sh postgres
# In psql:
SELECT * FROM profiles WHERE id = '<uuid>';
Solutions:
SELECT * FROM profiles WHERE id = '<uuid>' AND is_active = true;
429 Too Many Requests / QUOTA_EXCEEDEDSymptoms:
{
"error": {
"code": "QUOTA_EXCEEDED",
"message": "Monthly quota exceeded for this feature",
"details": {
"feature": "resume_tailoring",
"limit": 5,
"used": 5,
"resetDate": "2025-02-01T00:00:00Z"
}
}
}
Solutions:
resetDate in error response# Set env var (dev only)
DISABLE_RATE_LIMITING=true
500 Internal Server ErrorSymptoms:
{
"error": {
"code": "INTERNAL_ERROR",
"message": "An unexpected error occurred"
}
}
Diagnosis:
# Check API logs
docker-compose logs api | tail -100
# Look for stack traces
docker-compose logs api | grep -A 20 "Error:"
Solutions:
docker-compose restart apiSymptoms:
pending statusDiagnosis:
# Check orchestrator is running
docker-compose ps orchestrator
# Check worker is enabled
docker-compose exec orchestrator printenv | grep ORCH_WORKER_ENABLED
# Check Redis queue length
./scripts/dev-shell.sh redis
# In redis-cli:
LLEN "bull:task-queue:waiting"
LLEN "bull:task-queue:active"
LLEN "bull:task-queue:failed"
Solutions:
# Set in .env
ORCH_WORKER_ENABLED=true
# Restart orchestrator
docker-compose restart orchestrator
# Check REDIS_URL or ORCH_REDIS_URL
echo $ORCH_REDIS_URL
# Should be:
redis://redis:6379
# Check orchestrator logs
docker-compose logs orchestrator | grep "Registering handler"
# Ensure task type matches registered handlers
# Check failed queue
./scripts/dev-shell.sh redis
# In redis-cli:
LRANGE "bull:task-queue:failed" 0 -1
Symptoms:
Error: LLM request timeout after 30s
Solutions:
# Check Ollama is running
curl http://localhost:11434/api/tags
# Start Ollama
ollama serve
# Pull model if needed
ollama pull llama3.1:8b
# For local development
LOCAL_LLM_URL=http://localhost:11434
# For Docker containers
LOCAL_LLM_URL=http://host.docker.internal:11434
LOCAL_LLM_ALLOWED_HOSTS=host.docker.internal
# Set in .env
ORCH_AGENT_EXECUTOR=stub
hydration failed in Next.jsSymptoms:
Error: Hydration failed because the initial UI does not match what was rendered on the server.
Solutions:
'use client';
import dynamic from 'next/dynamic';
const ClientComponent = dynamic(() => import('./ClientComponent'), {
ssr: false,
});
// Use consistent formatting
const date = new Date(dateString).toISOString();
Symptoms:
Failed to fetch: net::ERR_CONNECTION_REFUSED
Diagnosis:
# Check API is running
curl http://localhost:3001/health
# Check NEXT_PUBLIC_API_BASE_URL
echo $NEXT_PUBLIC_API_BASE_URL
Solutions:
pnpm --filter @in-midst-my-life/api dev
# Set in .env
NEXT_PUBLIC_API_BASE_URL=http://localhost:3001
Symptoms:
Cannot read property 'append' of nullSolutions:
const containerRef = useRef<HTMLDivElement>(null);
useEffect(() => {
if (!containerRef.current) return;
// ... D3 code
}, []);
return <div ref={containerRef} />;
const D3Graph = dynamic(() => import('./D3Graph'), {
ssr: false,
});
# Set in .env
NEXT_PUBLIC_GRAPH_LAYOUT=radial
pnpm install failsSymptoms:
ERR_PNPM_LOCKFILE_BROKEN_NODE_MODULES
Solutions:
# Clean install
rm -rf node_modules
rm pnpm-lock.yaml
pnpm install
# If still failing, clear pnpm cache
pnpm store prune
pnpm install
Symptoms:
Type 'X' is not assignable to type 'Y'
Solutions:
# Rebuild all packages
pnpm build
# Clear TypeScript cache
rm tsconfig.tsbuildinfo
rm -rf apps/*/tsconfig.tsbuildinfo
rm -rf packages/*/tsconfig.tsbuildinfo
# Re-run typecheck
pnpm typecheck
Symptoms:
Error: listen EADDRINUSE: address already in use :::3001
Solutions:
# Find process using port
lsof -i :3001
# Kill process
kill -9 <PID>
# Or change port
# In .env:
API_PORT=3011
Diagnosis:
# Check pod status
kubectl get pods -n inmidst
# View pod logs
kubectl logs <pod-name> -n inmidst
# Describe pod for events
kubectl describe pod <pod-name> -n inmidst
Common Causes:
# Check pod environment
kubectl exec <pod-name> -n inmidst -- printenv
# Check image exists
docker pull <image-name>:<tag>
# Check image pull secrets
kubectl get secrets -n inmidst
Diagnosis:
# Check Helm release status
helm status inmidst -n inmidst
# View release history
helm history inmidst -n inmidst
# Get error details
helm get notes inmidst -n inmidst
Solutions:
helm rollback inmidst -n inmidst
helm uninstall inmidst -n inmidst
helm install inmidst . -n inmidst -f values.yaml
helm install inmidst . --dry-run --debug -n inmidst
Diagnosis:
# Check API metrics
curl http://localhost:3001/metrics | grep http_request_duration
# Test specific endpoint
time curl http://localhost:3001/profiles/<id>
Solutions:
# Enable query logging
# In postgresql.conf:
log_statement = 'all'
log_duration = on
# Check slow queries
SELECT * FROM pg_stat_statements
ORDER BY total_time DESC
LIMIT 10;
CREATE INDEX idx_profiles_slug ON profiles(slug);
CREATE INDEX idx_experiences_profile_id ON experiences(profile_id);
# Ensure REDIS_URL is set
echo $REDIS_URL
# Test Redis connection
./scripts/dev-shell.sh redis
Diagnosis:
# Check memory usage
docker stats
# Kubernetes
kubectl top pods -n inmidst
Solutions:
# docker-compose.yml
api:
deploy:
resources:
limits:
memory: 2G
// Reduce pool size
const pool = new Pool({
max: 5, // instead of 10
});
Symptoms:
Error: Unexpected token in JSON at position 0
Solutions:
# Switch to smaller model
LOCAL_LLM_MODEL=gemma3:4b
# Use text mode
ORCH_LLM_RESPONSE_FORMAT=text
Symptoms:
Error: Tool 'rg' not in allowlist
Solutions:
# Enable tool in allowlist
ORCH_TOOL_ALLOWLIST=rg,ls,cat
# Or disable tools entirely
ORCH_TOOL_ALLOWLIST= # Empty = no tools
# API
LOG_LEVEL=debug pnpm --filter @in-midst-my-life/api dev
# Orchestrator
LOG_LEVEL=debug pnpm --filter @in-midst-my-life/orchestrator dev
# Docker Compose
docker-compose up --verbose
# Create bug report bundle
./scripts/collect-diagnostics.sh > diagnostics.txt
# Includes:
# - Service status
# - Recent logs
# - Environment config (secrets redacted)
# - Database table counts
# - Redis stats
When reporting issues, include: