Additional Tips
Enhance your implementation with Clarifai
This page provides extra guidance to help you get the most out of your development experience with Clarifai. Whether you're optimizing performance or just looking for best practices, these tips offer practical advice beyond the basics.
Webhook Integration
You can enable real-time automation by integrating Clarifai webhooks into your backend system. For example, you can set up a secure Flask endpoint to receive and validate webhook notifications for events like model training completion or new input uploads.
- Python
from flask import Flask, request, jsonify
import hmac
import hashlib
import json
app = Flask(__name__)
WEBHOOK_SECRET = os.environ.get("CLARIFAI_WEBHOOK_SECRET")
@app.route('/webhook', methods=['POST'])
def handle_webhook():
"""Handle Clarifai webhook notifications"""
# Verify webhook signature
signature = request.headers.get('X-Clarifai-Signature')
if not verify_webhook_signature(request.data, signature):
return jsonify({'error': 'Invalid signature'}), 401
# Process webhook data
webhook_data = request.get_json()
if webhook_data.get('type') == 'model.training.completed':
handle_training_completed(webhook_data)
elif webhook_data.get('type') == 'input.uploaded':
handle_input_uploaded(webhook_data)
return jsonify({'status': 'success'})
def verify_webhook_signature(payload: bytes, signature: str) -> bool:
"""Verify webhook signature for security"""
if not WEBHOOK_SECRET or not signature:
return False
expected_signature = hmac.new(
WEBHOOK_SECRET.encode(),
payload,
hashlib.sha256
).hexdigest()
return hmac.compare_digest(f"sha256={expected_signature}", signature)
def handle_training_completed(data: dict):
"""Handle model training completion"""
model_id = data.get('model_id')
print(f"✅ Model {model_id} training completed!")
# Add your custom logic here
# e.g., send notification, update database, trigger next workflow
def handle_input_uploaded(data: dict):
"""Handle new input upload"""
input_id = data.get('input_id')
print(f"📁 New input uploaded: {input_id}")
# Add your custom logic here
# e.g., trigger automatic processing, update UI
if __name__ == '__main__':
app.run(debug=True, port=5000)
Caching Predictions
You can avoid repeated computation for identical inputs by caching model predictions locally. For example, you can introduce a simple decorator-based caching mechanism to store and retrieve results from disk, which optimizes performance for resource-intensive prediction tasks.
- Python
import functools
import hashlib
import pickle
import os
from typing import Any, Callable
def cache_predictions(cache_dir: str = ".cache"):
"""Decorator to cache model predictions"""
os.makedirs(cache_dir, exist_ok=True)
def decorator(func: Callable) -> Callable:
@functools.wraps(func)
def wrapper(*args, **kwargs):
# Create cache key from function arguments
cache_key = hashlib.md5(
str(args).encode() + str(kwargs).encode()
).hexdigest()
cache_file = os.path.join(cache_dir, f"{func.__name__}_{cache_key}.pkl")
# Try to load from cache
if os.path.exists(cache_file):
try:
with open(cache_file, 'rb') as f:
return pickle.load(f)
except Exception:
pass # Cache corrupted, will regenerate
# Generate new result
result = func(*args, **kwargs)
# Save to cache
try:
with open(cache_file, 'wb') as f:
pickle.dump(result, f)
except Exception:
pass # Failed to cache, continue anyway
return result
return wrapper
return decorator
# Usage example
@cache_predictions()
def expensive_prediction(model_url: str, input_text: str):
model = Model(url=model_url, pat=os.environ.get("CLARIFAI_PAT"))
return model.predict_by_bytes(input_text.encode(), input_type="text")