{"x402Version":2,"service":{"name":"LumenJoule Compute","description":"Pay-per-request AI inference API. OpenAI-compatible chat completions with 7 models. Accepts USDC on Base and LumenJoule on Stellar via x402 protocol. No API keys required.","url":"https://compute.lumenbro.com","documentation":"https://compute.lumenbro.com/docs"},"resources":[{"resource":"https://compute.lumenbro.com/api/v1/chat/completions","type":"http","method":"POST","description":"AI chat completions (OpenAI-compatible). Supports 7 models from Llama, Mistral, DeepSeek, and Qwen families.","mimeType":"application/json","accepts":[{"scheme":"exact","network":"eip155:8453","asset":"0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913","payTo":"0xA956311362f68Bd5291CED9C697998714bEEDA07","maxTimeoutSeconds":60,"description":"USDC on Base"},{"scheme":"exact","network":"stellar:pubnet","asset":"CBVWPBYEDJ7GYIUHL2HITMEEWM75WAMFINIQCR4ZAFZ62ISDFBVERQCX","payTo":"GBQG67XV2VEKRYZBGT5LZSBOHVVVX7CLTCO7WCGQAA4R2SV2BCJW2VP2","description":"LumenJoule on Stellar"}],"models":[{"id":"meta-llama/Llama-3.3-70B-Instruct","displayName":"Llama 3.3 70B","tier":"medium","maxOutputTokens":4096},{"id":"meta-llama/Llama-3.2-3B-Instruct","displayName":"Llama 3.2 3B","tier":"small","maxOutputTokens":4096},{"id":"meta-llama/Llama-4-Scout-17B-16E-Instruct","displayName":"Llama 4 Scout 17B","tier":"medium","maxOutputTokens":4096},{"id":"mistralai/Mistral-Small-24B-Instruct-2501","displayName":"Mistral Small 24B","tier":"medium","maxOutputTokens":4096},{"id":"deepseek-ai/DeepSeek-V3","displayName":"DeepSeek V3","tier":"large","maxOutputTokens":8192},{"id":"Qwen/Qwen2.5-72B-Instruct","displayName":"Qwen 2.5 72B","tier":"medium","maxOutputTokens":4096},{"id":"deepseek-ai/DeepSeek-R1","displayName":"DeepSeek R1","tier":"reasoning","maxOutputTokens":8192}],"inputSchema":{"type":"object","properties":{"model":{"type":"string","enum":["meta-llama/Llama-3.3-70B-Instruct","meta-llama/Llama-3.2-3B-Instruct","meta-llama/Llama-4-Scout-17B-16E-Instruct","mistralai/Mistral-Small-24B-Instruct-2501","deepseek-ai/DeepSeek-V3","Qwen/Qwen2.5-72B-Instruct","deepseek-ai/DeepSeek-R1"],"description":"Model ID for inference"},"messages":{"type":"array","items":{"type":"object","properties":{"role":{"type":"string","enum":["system","user","assistant"]},"content":{"type":"string"}},"required":["role","content"]},"minItems":1,"description":"Chat messages in OpenAI format"},"max_tokens":{"type":"number","description":"Maximum output tokens"},"temperature":{"type":"number","description":"Sampling temperature 0-2"},"stream":{"type":"boolean","description":"Enable SSE streaming"}},"required":["model","messages"]},"outputExample":{"id":"chatcmpl-abc123","object":"chat.completion","choices":[{"index":0,"message":{"role":"assistant","content":"Hello! How can I help?"},"finish_reason":"stop"}],"usage":{"prompt_tokens":10,"completion_tokens":8,"total_tokens":18}}},{"resource":"https://compute.lumenbro.com/api/models","type":"http","method":"GET","description":"List available models and their capabilities. Free endpoint — no payment required.","mimeType":"application/json"},{"resource":"https://compute.lumenbro.com/api/health","type":"http","method":"GET","description":"Health check endpoint. Returns service status and supported networks.","mimeType":"application/json"}]}