Automatic Speech Recognition

Basic Usage

import os
from huggingface_hub import InferenceClient

client = InferenceClient(
    provider="fal-ai",
    api_key=os.environ["HF_TOKEN"],
)

output = client.automatic_speech_recognition("sample1.flac", model="openai/whisper-large-v3")

cURL Example

curl https://router.huggingface.co/fal-ai/fal-ai/whisper \
    -X POST \
    -H "Authorization: Bearer $HF_TOKEN" \
    -H 'Content-Type: audio/flac' \
    --data-binary @"sample1.flac"

With HF-Inferoxy Token Management

from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from hf_token_utils import get_proxy_token, report_token_status

def fal_ai_speech_recognition(audio_file: str, model: str = "openai/whisper-large-v3", proxy_api_key: str = None):
    # Get token from proxy server (requires authentication)
    token, token_id = get_proxy_token(api_key=proxy_api_key)
    
    # Create client with managed token
    client = InferenceClient(
        provider="fal-ai",
        api_key=token
    )
    
    try:
        # Perform automatic speech recognition
        output = client.automatic_speech_recognition(audio_file, model=model)
        
        # Report success
        report_token_status(token_id, "success", api_key=proxy_api_key)
        
        return output
        
    except HfHubHTTPError as e:
        # Report the error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise
    except Exception as e:
        # Report generic error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise

# Usage
if __name__ == "__main__":
    # You need to get your API key from the admin or create a user account
    # See RBAC_README.md for details on user management
    proxy_api_key = "your_proxy_api_key_here"  # Get this from admin
    result = fal_ai_speech_recognition("sample1.flac", proxy_api_key=proxy_api_key)
    print(result)

Image To Image

Basic Usage

import os
from huggingface_hub import InferenceClient

client = InferenceClient(
    provider="fal-ai",
    api_key=os.environ["HF_TOKEN"],
)

with open("cat.png", "rb") as image_file:
   input_image = image_file.read()

# output is a PIL.Image object
image = client.image_to_image(
    input_image,
    prompt="Turn the cat into a tiger.",
    model="black-forest-labs/FLUX.1-Kontext-dev",
)

cURL Example

curl -X POST "https://router.huggingface.co/fal-ai/fal-ai/flux-kontext/dev" \
  -H "Authorization: Bearer $HF_TOKEN" \
  -H "Content-Type: application/json" \
  -d "{
    \"prompt\": \"Turn the cat into a tiger.\",
    \"image_url\": \"data:image/jpeg;base64,$(base64 -w 0 cat.jpg)\"
  }"

With HF-Inferoxy Token Management

from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from hf_token_utils import get_proxy_token, report_token_status

def fal_ai_image_to_image(image_path: str, prompt: str, model: str = "black-forest-labs/FLUX.1-Kontext-dev", proxy_api_key: str = None):
    # Get token from proxy server (requires authentication)
    token, token_id = get_proxy_token(api_key=proxy_api_key)
    
    # Create client with managed token
    client = InferenceClient(
        provider="fal-ai",
        api_key=token
    )
    
    try:
        # Read the input image
        with open(image_path, "rb") as image_file:
            input_image = image_file.read()
        
        # Perform image to image transformation
        # output is a PIL.Image object
        image = client.image_to_image(
            input_image,
            prompt=prompt,
            model=model,
        )
        
        # Report success
        report_token_status(token_id, "success", api_key=proxy_api_key)
        
        return image
        
    except HfHubHTTPError as e:
        # Report the error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise
    except Exception as e:
        # Report generic error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise

# Usage
if __name__ == "__main__":
    # You need to get your API key from the admin or create a user account
    # See RBAC_README.md for details on user management
    proxy_api_key = "your_proxy_api_key_here"  # Get this from admin
    result_image = fal_ai_image_to_image("cat.png", "Turn the cat into a tiger.", proxy_api_key=proxy_api_key)
    result_image.save("tiger.png")

Text To Image

Basic Usage

import os
from huggingface_hub import InferenceClient

client = InferenceClient(
    provider="fal-ai",
    api_key=os.environ["HF_TOKEN"],
)

# output is a PIL.Image object
image = client.text_to_image(
    "Astronaut riding a horse",
    model="Qwen/Qwen-Image",
)

cURL Example

curl -s -X POST "https://router.huggingface.co/fal-ai/fal-ai/qwen-image" \
  -H "Authorization: Bearer $HF_TOKEN" \
  -H "Content-Type: application/json" \
  -d '{
    "sync_mode": true,
    "prompt": "Astronaut riding a horse"
  }' \
| jq -r '.images[0].url' \
| sed 's/^data:image\/png;base64,//' \
| base64 --decode > astronaut.png

With HF-Inferoxy Token Management

from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from hf_token_utils import get_proxy_token, report_token_status

def fal_ai_text_to_image(prompt: str, model: str = "Qwen/Qwen-Image", proxy_api_key: str = None):
    # Get token from proxy server (requires authentication)
    token, token_id = get_proxy_token(api_key=proxy_api_key)
    
    # Create client with managed token
    client = InferenceClient(
        provider="fal-ai",
        api_key=token
    )
    
    try:
        # Generate image from text
        # output is a PIL.Image object
        image = client.text_to_image(
            prompt,
            model=model,
        )
        
        # Report success
        report_token_status(token_id, "success", api_key=proxy_api_key)
        
        return image
        
    except HfHubHTTPError as e:
        # Report the error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise
    except Exception as e:
        # Report generic error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise

# Usage
if __name__ == "__main__":
    # You need to get your API key from the admin or create a user account
    # See RBAC_README.md for details on user management
    proxy_api_key = "your_proxy_api_key_here"  # Get this from admin
    result_image = fal_ai_text_to_image("Astronaut riding a horse", proxy_api_key=proxy_api_key)
    result_image.save("astronaut_horse.png")

Text To Video

Basic Usage

import os
from huggingface_hub import InferenceClient

client = InferenceClient(
    provider="fal-ai",
    api_key=os.environ["HF_TOKEN"],
)

video = client.text_to_video(
    "A young man walking on the street",
    model="Wan-AI/Wan2.2-T2V-A14B",
)

With HF-Inferoxy Token Management

from huggingface_hub import InferenceClient
from huggingface_hub.errors import HfHubHTTPError
from hf_token_utils import get_proxy_token, report_token_status

def fal_ai_text_to_video(prompt: str, model: str = "Wan-AI/Wan2.2-T2V-A14B", proxy_api_key: str = None):
    # Get token from proxy server (requires authentication)
    token, token_id = get_proxy_token(api_key=proxy_api_key)
    
    # Create client with managed token
    client = InferenceClient(
        provider="fal-ai",
        api_key=token
    )
    
    try:
        # Generate video from text
        video = client.text_to_video(
            prompt,
            model=model,
        )
        
        # Report success
        report_token_status(token_id, "success", api_key=proxy_api_key)
        
        return video
        
    except HfHubHTTPError as e:
        # Report the error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise
    except Exception as e:
        # Report generic error
        report_token_status(token_id, "error", str(e), api_key=proxy_api_key)
        raise

# Usage
if __name__ == "__main__":
    # You need to get your API key from the admin or create a user account
    # See RBAC_README.md for details on user management
    proxy_api_key = "your_proxy_api_key_here"  # Get this from admin
    result_video = fal_ai_text_to_video("A young man walking on the street", proxy_api_key=proxy_api_key)
    # Save or process the video as needed
    print(f"Generated video: {result_video}")

⚠️ Important: Authentication Required

All client operations now require authentication with the HF-Inferoxy server. This is part of the Role-Based Access Control (RBAC) system that provides secure access to the proxy services.

Getting Your API Key

  1. Default Admin User: The system creates a default admin user on first run. Check your server logs or the users.json file for the default admin credentials.

  2. Create a User Account: Use the admin account to create a regular user account:
    curl -X POST "http://localhost:8000/admin/users" \
      -H "Authorization: Bearer ADMIN_API_KEY" \
      -H "Content-Type: application/json" \
      -d '{"username": "youruser", "email": "user@example.com", "full_name": "Your Name", "role": "user"}'
    
  3. Use the Generated API Key: The response will include an API key that you’ll use in all client operations.

For detailed RBAC setup and user management, see RBAC_README.md.