$LLMZone Docs

Test Script

A complete test script with error handling and JSON output

Full Test Script

test.py
import requests
import json
import time

# Configuration
API_KEY = "YOUR_API_KEY"
BASE_URL = "https://api.llmzone.net/v1"

def run_chat_example():
    print(f"\n--- Testing Model: claude-opus-4-6 ---")
    url = f"{BASE_URL}/chat/completions"
    headers = {
        "Authorization": f"Bearer {API_KEY}",
        "Content-Type": "application/json"
    }

    payload = {
        "model": "claude-opus-4-6",
        "messages": [
            {"role": "user", "content": "Hello! Are you working?"}
        ]
    }

    try:
        response = requests.post(url, headers=headers, json=payload)
        print(f"Status Code: {response.status_code}")
        if response.status_code == 200:
            print("Response Body:")
            print(json.dumps(response.json(), indent=2))
        else:
            print(f"Error: {response.text}")
    except Exception as e:
        print(f"Failed to connect: {e}")

if __name__ == "__main__":
    time.sleep(2)
    run_chat_example()

Expected Output

--- Testing Model: claude-opus-4-6 ---
Status Code: 200
Response Body:
{
  "choices": [
    {
      "finish_reason": "stop",
      "message": {
        "content": "Hello! Yes, I'm working and ready to help.",
        "role": "assistant"
      }
    }
  ],
  "usage": {
    "completion_tokens": 22,
    "prompt_tokens": 13,
    "total_tokens": 35
  },
  "model": "claude-opus-4-6"
}

On this page