base_url: https://api.openai.com/v1
# api_key: ${OPENAI_API_KEY} # Optional fallback if client doesn't send auth header
-# Local provider - only for route mode
-# PII requests are sent here instead of the configured provider
+# Local provider - only used when mode: route
# Supports: ollama (native), openai (for vLLM, LocalAI, LM Studio, etc.)
-#
-# Uncomment for route mode:
-# local:
-# type: ollama # or "openai" for OpenAI-compatible servers
-# base_url: http://localhost:11434
-# model: llama3.2
-# # api_key: ${LOCAL_API_KEY} # Only needed for OpenAI-compatible servers
+local:
+ type: ollama
+ base_url: http://localhost:11434
+ model: llama3.2
+ # api_key: ${LOCAL_API_KEY} # Only needed for OpenAI-compatible servers
# Masking settings (only for mask mode)
masking:
}
/**
- * Wildcard proxy - forwards all other /v1/* requests to the configured provider
- * Supports: /models, /embeddings, /audio/*, /images/*, /files/*, etc.
- * Must be defined AFTER specific routes to avoid matching them first
+ * Wildcard proxy for /models, /embeddings, /audio/*, /images/*, etc.
*/
proxyRoutes.all("/*", (c) => {
const { openai } = getRouter().getProvidersInfo();
const path = c.req.path.replace(/^\/openai\/v1/, "");
return proxy(`${openai.baseUrl}${path}`, {
+ ...c.req,
headers: {
+ "Content-Type": c.req.header("Content-Type"),
Authorization: c.req.header("Authorization"),
},
});