chat / .env
extonlawrence's picture
Ollama references
4474b88
# Use .env.local to change these variables
# DO NOT EDIT THIS FILE WITH SENSITIVE DATA
### Models ###
# Models are sourced exclusively from an OpenAI-compatible base URL.
# Example: https://router.huggingface.co/v1
# http://localhost:11434/v1 for Ollama model API
OPENAI_BASE_URL=https://router.huggingface.co/v1
# Canonical auth token for any OpenAI-compatible provider
OPENAI_API_KEY=#your provider API key (works for HF router, OpenAI, LM Studio, etc.)
# Legacy alias (still supported): if set and OPENAI_API_KEY is empty, it will be used
# HF_TOKEN=
# Ollama #
# Ollama model download list, comma-separated, keep empty if not using ollama.
# OLLAMA_MODELS=
### MongoDB ###
MONGODB_URL=#your mongodb URL here, use chat-ui-db image if you don't want to set this
MONGODB_DB_NAME=chat-ui
MONGODB_DIRECT_CONNECTION=false
## Public app configuration ##
PUBLIC_APP_GUEST_MESSAGE=# a message to the guest user. If not set, no message will be shown. Only used if you have authentication enabled.
PUBLIC_APP_NAME=ChatUI # name used as title throughout the app
PUBLIC_APP_ASSETS=chatui # used to find logos & favicons in static/$PUBLIC_APP_ASSETS
PUBLIC_APP_DESCRIPTION=# description used throughout the app
PUBLIC_APP_DATA_SHARING=# Set to 1 to enable an option in the user settings to share conversations with model authors
### Local Storage ###
MONGO_STORAGE_PATH= # where is the db folder stored
REASONING_SUMMARY=false # Change this to false to disable reasoning summary
## Models overrides
MODELS=
## Task model
# Optional: set to the model id/name from the `${OPENAI_BASE_URL}/models` list
# to use for internal tasks (title summarization, etc). If not set, the current model will be used
TASK_MODEL=
# Arch router (OpenAI-compatible) endpoint base URL used for route selection
# Example: https://api.openai.com/v1 or your hosted Arch endpoint
LLM_ROUTER_ARCH_BASE_URL=
## LLM Router Configuration
# Path to routes policy (JSON array). Defaults to llm-router/routes.chat.json
LLM_ROUTER_ROUTES_PATH=
# Model used at the Arch router endpoint for selection
LLM_ROUTER_ARCH_MODEL=
# Fallback behavior
# Route to map "other" to (must exist in routes file)
LLM_ROUTER_OTHER_ROUTE=casual_conversation
# Model to call if the Arch selection fails entirely
LLM_ROUTER_FALLBACK_MODEL=
# Arch selection timeout in milliseconds (default 10000)
LLM_ROUTER_ARCH_TIMEOUT_MS=10000
# Enable router multimodal fallback (set to true to allow image inputs via router)
LLM_ROUTER_ENABLE_MULTIMODAL=false
# Router UI overrides (client-visible)
# Public display name for the router entry in the model list. Defaults to "Omni".
PUBLIC_LLM_ROUTER_DISPLAY_NAME=Omni
# Optional: public logo URL for the router entry. If unset, the UI shows a Carbon icon.
PUBLIC_LLM_ROUTER_LOGO_URL=
# Public alias id used for the virtual router model (Omni). Defaults to "omni".
PUBLIC_LLM_ROUTER_ALIAS_ID=omni
### Authentication ###
# Parameters to enable open id login
OPENID_CONFIG=
MESSAGES_BEFORE_LOGIN=# how many messages a user can send in a conversation before having to login. set to 0 to force login right away
# if it's defined, only these emails will be allowed to use login
ALLOWED_USER_EMAILS=`[]`
# If it's defined, users with emails matching these domains will also be allowed to use login
ALLOWED_USER_DOMAINS=`[]`
# valid alternative redirect URLs for OAuth, used for HuggingChat apps
ALTERNATIVE_REDIRECT_URLS=`[]`
### Cookies
# name of the cookie used to store the session
COOKIE_NAME=hf-chat
# If the value of this cookie changes, the session is destroyed. Useful if chat-ui is deployed on a subpath
# of your domain, and you want chat ui sessions to reset if the user's auth changes
COUPLE_SESSION_WITH_COOKIE_NAME=
# specify secure behaviour for cookies
COOKIE_SAMESITE=# can be "lax", "strict", "none" or left empty
COOKIE_SECURE=# set to true to only allow cookies over https
TRUSTED_EMAIL_HEADER=# header to use to get the user email, only use if you know what you are doing
### Admin stuff ###
ADMIN_CLI_LOGIN=true # set to false to disable the CLI login
ADMIN_TOKEN=#We recommend leaving this empty, you can get the token from the terminal.
PUBLIC_SMOOTH_UPDATES=false # set to true to enable smoothing of messages client-side, can be CPU intensive
PUBLIC_ORIGIN=#https://huggingface.co
PUBLIC_SHARE_PREFIX=#https://hf.co/chat
# mostly huggingchat specific
PUBLIC_GOOGLE_ANALYTICS_ID=#G-XXXXXXXX / Leave empty to disable
PUBLIC_PLAUSIBLE_SCRIPT_URL=#/js/script.js / Leave empty to disable
PUBLIC_APPLE_APP_ID=#1234567890 / Leave empty to disable
### Feature Flags ###
LLM_SUMMARIZATION=true # generate conversation titles with LLMs
ALLOW_IFRAME=true # Allow the app to be embedded in an iframe
ENABLE_DATA_EXPORT=true
### Rate limits ###
# See `src/lib/server/usageLimits.ts`
# {
# conversations: number, # how many conversations
# messages: number, # how many messages in a conversation
# assistants: number, # how many assistants
# messageLength: number, # how long can a message be before we cut it off
# messagesPerMinute: number, # how many messages per minute
# tools: number # how many tools
# }
USAGE_LIMITS=`{}`
### HuggingFace specific ###
## Feature flag & admin settings
# Used for setting early access & admin flags to users
HF_ORG_ADMIN=
HF_ORG_EARLY_ACCESS=
WEBHOOK_URL_REPORT_ASSISTANT=#provide slack webhook url to get notified for reports/feature requests
### Metrics ###
LOG_LEVEL=info
### Parquet export ###
# Not in use anymore but useful to export conversations to a parquet file as a HuggingFace dataset
PARQUET_EXPORT_DATASET=
PARQUET_EXPORT_HF_TOKEN=
ADMIN_API_SECRET=# secret to admin API calls, like computing usage stats or exporting parquet data
### Config ###
ENABLE_CONFIG_MANAGER=true
### Docker build variables ###
# These values cannot be updated at runtime
# They need to be passed when building the docker image
# See https://github.com/huggingface/chat-ui/main/.github/workflows/deploy-prod.yml#L44-L47
APP_BASE="" # base path of the app, e.g. /chat, left blank as default
### Body size limit for SvelteKit https://svelte.dev/docs/kit/adapter-node#Environment-variables-BODY_SIZE_LIMIT
BODY_SIZE_LIMIT=15728640
PUBLIC_COMMIT_SHA=
### LEGACY parameters
ALLOW_INSECURE_COOKIES=false # LEGACY! Use COOKIE_SECURE and COOKIE_SAMESITE instead
PARQUET_EXPORT_SECRET=#DEPRECATED, use ADMIN_API_SECRET instead
RATE_LIMIT= # /!\ DEPRECATED definition of messages per minute. Use USAGE_LIMITS.messagesPerMinute instead
OPENID_CLIENT_ID=
OPENID_CLIENT_SECRET=
OPENID_SCOPES="openid profile" # Add "email" for some providers like Google that do not provide preferred_username
OPENID_NAME_CLAIM="name" # Change to "username" for some providers that do not provide name
OPENID_PROVIDER_URL=https://huggingface.co # for Google, use https://accounts.google.com
OPENID_TOLERANCE=
OPENID_RESOURCE=
EXPOSE_API=# deprecated, API is now always exposed