model value. See the Models Catalog for all available options.
cURL
curl https://api.lightweight.one/v1/chat/completions \
-H "Authorization: Bearer lw_sk_your-key-here" \
-H "Content-Type: application/json" \
-d '{
"model": "gpt-5.4",
"messages": [{"role": "user", "content": "Hello, how are you?"}],
"max_tokens": 200
}'
Python
pip install openai
from openai import OpenAI
client = OpenAI(
api_key="lw_sk_your-key-here",
base_url="https://api.lightweight.one/v1"
)
response = client.chat.completions.create(
model="gpt-5.4",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": "What is the capital of France?"}
]
)
print(response.choices[0].message.content)
Node.js / TypeScript
npm install openai
import OpenAI from "openai";
const client = new OpenAI({
apiKey: "lw_sk_your-key-here",
baseURL: "https://api.lightweight.one/v1",
});
async function main() {
const response = await client.chat.completions.create({
model: "gpt-5.4",
messages: [
{ role: "system", content: "You are a helpful assistant." },
{ role: "user", content: "What is the capital of France?" },
],
});
console.log(response.choices[0].message.content);
}
main();
Streaming (Python)
from openai import OpenAI
client = OpenAI(
api_key="lw_sk_your-key-here",
base_url="https://api.lightweight.one/v1"
)
stream = client.chat.completions.create(
model="claude-sonnet-4.5",
messages=[{"role": "user", "content": "Write a haiku about coding"}],
stream=True
)
for chunk in stream:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
Streaming (Node.js)
import OpenAI from "openai";
const client = new OpenAI({
apiKey: "lw_sk_your-key-here",
baseURL: "https://api.lightweight.one/v1",
});
const stream = await client.chat.completions.create({
model: "claude-sonnet-4.5",
messages: [{ role: "user", content: "Write a haiku about coding" }],
stream: true,
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) process.stdout.write(content);
}
Using Different Models
Swap themodel parameter to use any provider:
# OpenAI
response = client.chat.completions.create(model="gpt-5.4", messages=messages)
# Anthropic
response = client.chat.completions.create(model="claude-sonnet-4.5", messages=messages)
# Google
response = client.chat.completions.create(model="gemini-2.5-pro", messages=messages)
# xAI
response = client.chat.completions.create(model="grok-code-fast-1", messages=messages)