Chat Completion (LLM)
Basic Usage
import os
from huggingface_hub import InferenceClient
client = InferenceClient(
provider="cerebras",
api_key=os.environ["HF_TOKEN"],
)
completion = client.chat.completions.create(
model="openai/gpt-oss-120b",
messages=[
{
"role": "user",
"content": "What is the capital of France?"
}
],
)
print(completion.choices[0].message)
cURL Example
curl https://router.huggingface.co/v1/chat/completions \
-H "Authorization: Bearer $HF_TOKEN" \
-H 'Content-Type: application/json' \
-d '{
"messages": [
{
"role": "user",
"content": "What is the capital of France?"
}
],
"model": "openai/gpt-oss-120b:cerebras",
"stream": false
}'
Using HF-Inferoxy Token Management
from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from hf_token_utils import get_proxy_token, report_token_status
def cerebras_chat_completion(proxy_api_key: str):
# Get token from proxy server (requires authentication)
token, token_id = get_proxy_token(api_key=proxy_api_key)
# Create client with managed token
client = InferenceClient(
provider="cerebras",
api_key=token
)
try:
# Make chat completion request
completion = client.chat.completions.create(
model="openai/gpt-oss-120b",
messages=[
{
"role": "user",
"content": "What is the capital of France?"
}
],
)
# Report success
report_token_status(token_id, "success", api_key=proxy_api_key)
print(completion.choices[0].message)
return completion
except HfHubHTTPError as e:
# Report the error
report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
raise
except Exception as e:
# Report generic error
report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
raise
# Usage
if __name__ == "__main__":
# You need to get your API key from the admin or create a user account
# See RBAC_README.md for details on user management
proxy_api_key = "your_proxy_api_key_here" # Get this from admin
cerebras_chat_completion(proxy_api_key)
Chat Completion (VLM)
Basic Usage
import os
from huggingface_hub import InferenceClient
client = InferenceClient(
provider="cerebras",
api_key=os.environ["HF_TOKEN"],
)
completion = client.chat.completions.create(
model="meta-llama/Llama-4-Scout-17B-16E-Instruct",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in one sentence."
},
{
"type": "image_url",
"image_url": {
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
],
)
print(completion.choices[0].message)
cURL Example
curl https://router.huggingface.co/v1/chat/completions \
-H "Authorization: Bearer $HF_TOKEN" \
-H 'Content-Type: application/json' \
-d '{
"messages": [
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in one sentence."
},
{
"type": "image_url",
"image_url": {
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
],
"model": "meta-llama/Llama-4-Scout-17B-16E-Instruct:cerebras",
"stream": false
}'
Using HF-Inferoxy Token Management
from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from hf_token_utils import get_proxy_token, report_token_status
def cerebras_vlm_completion(proxy_api_key: str):
# Get token from proxy server (requires authentication)
token, token_id = get_proxy_token(api_key=proxy_api_key)
# Create client with managed token
client = InferenceClient(
provider="cerebras",
api_key=token
)
try:
# Make VLM completion request
completion = client.chat.completions.create(
model="meta-llama/Llama-4-Scout-17B-16E-Instruct",
messages=[
{
"role": "user",
"content": [
{
"type": "text",
"text": "Describe this image in one sentence."
},
{
"type": "image_url",
"image_url": {
"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
}
}
]
}
],
)
# Report success
report_token_status(token_id, "success", api_key=proxy_api_key)
print(completion.choices[0].message)
return completion
except HfHubHTTPError as e:
# Report the error
report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
raise
except Exception as e:
# Report generic error
report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
raise
# Usage
if __name__ == "__main__":
# You need to get your API key from the admin or create a user account
# See RBAC_README.md for details on user management
proxy_api_key = "your_proxy_api_key_here" # Get this from admin
cerebras_vlm_completion(proxy_api_key)
⚠️ Important: Authentication Required
All client operations now require authentication with the HF-Inferoxy server. This is part of the Role-Based Access Control (RBAC) system that provides secure access to the proxy services.
Getting Your API Key
-
Default Admin User: The system creates a default admin user on first run. Check your server logs or the
users.json
file for the default admin credentials. - Create a User Account: Use the admin account to create a regular user account:
curl -X POST "http://localhost:8000/admin/users" \ -H "Authorization: Bearer ADMIN_API_KEY" \ -H "Content-Type: application/json" \ -d '{"username": "youruser", "email": "user@example.com", "full_name": "Your Name", "role": "user"}'
- Use the Generated API Key: The response will include an API key that you’ll use in all client operations.
For detailed RBAC setup and user management, see RBAC_README.md.