-
Notifications
You must be signed in to change notification settings - Fork 1
/
.env.local.example
81 lines (67 loc) · 3.7 KB
/
.env.local.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# File: .env.local.example
# Amazon Bedrock Titan Configuration
AMAZON_BEDROCK_TITAN_TEXT_MODEL=amazon.titan-text-express-v1
AMAZON_BEDROCK_TITAN_EMBEDDING_MODEL=amazon.titan-embed-text-v2:0
AMAZON_BEDROCK_TITAN_ENDPOINT= ***YOUR-AMAZON-BEDROCK-TITAN-ENDPOINT***
# Azure OpenAI O1 Configuration
AZURE_OPENAI_O1_TEXT_MODEL=o1-mini
AZURE_OPENAI_O1_EMBEDDING_MODEL=text-embedding-3-small
AZURE_OPENAI_O1_ENDPOINT= ***YOUR-AZURE-OPENAI-O1-ENDPOINT***
AZURE_OPENAI_O1_API_KEY= ***YOUR-AZURE-OPENAI-O1-API-KEY***
# Cloudflare Gemma Configuration
CLOUDFLARE_GEMMA_TEXT_MODEL=gemma-2b-it-lora
CLOUDFLARE_GEMMA_ACCOUNT_ID= ***YOUR-CLOUDFLARE-GEMMA-ACCOUNT-ID***
CLOUDFLARE_GEMMA_ENDPOINT=https://api.cloudflare.com/client/v4/accounts/ ***YOUR-CLOUDFLARE-GEMMA-ACCOUNT-ID*** /ai/run/@cf/google/gemma-2b-it-lora
CLOUDFLARE_GEMMA_BEARER_TOKEN= ***YOUR-CLOUDFLARE-GEMMA-X-BEARER-TOKEN***
CLOUDFLARE_GEMMA_X_AUTH_EMAIL= ***YOUR-CLOUDFLARE-GEMMA-X-AUTH-EMAIL***
CLOUDFLARE_GEMMA_X_AUTH_KEY= ***YOUR-CLOUDFLARE-GEMMA-X-AUTH-KEY***
# https://github.com/orgs/vercel/discussions/3621#discussioncomment-6740777
# Vercel does not have support in the Environment Variables UI
# to reference other variables. Referencing other variables
# is only supported in .env* files.
#
# CLOUDFLARE_GEMMA_ENDPOINT=https://api.cloudflare.com/client/v4/accounts/${CLOUDFLARE_GEMMA_ACCOUNT_ID}/ai/run/@cf/google/${CLOUDFLARE_GEMMA_TEXT_MODEL}
# works in .env.local but not in Vercel's Environment Variables UI.
#
# Therefore, we need to use
# CLOUDFLARE_GEMMA_ENDPOINT=https://api.cloudflare.com/client/v4/accounts/ ***YOUR-CLOUDFLARE-GEMMA-ACCOUNT-ID*** /ai/run/@cf/google/gemma-7b-it-lora
# Cloudflare Llama Configuration
CLOUDFLARE_LLAMA_TEXT_MODEL=llama-3.2-1b-instruct
CLOUDFLARE_LLAMA_ACCOUNT_ID= ***YOUR-CLOUDFLARE-LLAMA-ACCOUNT-ID***
CLOUDFLARE_LLAMA_ENDPOINT=https://api.cloudflare.com/client/v4/accounts/ ***YOUR-CLOUDFLARE-LLAMA-ACCOUNT-ID*** /ai/run/@cf/meta/llama-3.2-1b-instruct
CLOUDFLARE_LLAMA_BEARER_TOKEN= ***YOUR-CLOUDFLARE-LLAMA-X-BEARER-TOKEN***
CLOUDFLARE_LLAMA_X_AUTH_EMAIL= ***YOUR-CLOUDFLARE-LLAMA-X-AUTH-EMAIL***
CLOUDFLARE_LLAMA_X_AUTH_KEY= ***YOUR-CLOUDFLARE-LLAMA-X-AUTH-KEY***
# Google Vertex Gemini Configuration
GOOGLE_VERTEX_GEMINI_TEXT_MODEL=gemini-1.5-flash-8b
GOOGLE_VERTEX_GEMINI_EMBEDDING_MODEL=text-embedding-004
GOOGLE_VERTEX_GEMINI_LOCATION= ***YOUR-GOOGLE-VERTEX-GEMINI-LOCATION***
GOOGLE_APPLICATION_CREDENTIALS= ***YOUR-GOOGLE-APPLICATION-CREDENTIALS***
GOOGLE_CLOUD_PROJECT= ***YOUR-GOOGLE-CLOUD-PROJECT-ID***
# Google Vertex Gemma Configuration
GOOGLE_VERTEX_GEMMA_TEXT_MODEL=gemma2:2b
GOOGLE_VERTEX_GEMMA_EMBEDDING_MODEL=text-embedding-004
GOOGLE_VERTEX_GEMMA_LOCATION= ***YOUR-GOOGLE-VERTEX-GEMMA-LOCATION***
GOOGLE_VERTEX_GEMMA_ENDPOINT= ***YOUR-GOOGLE-VERTEX-GEMMA-ENDPOINT***
# Google Vertex Llama Configuration
GOOGLE_VERTEX_LLAMA_TEXT_MODEL=llama3.2:1b
GOOGLE_VERTEX_LLAMA_EMBEDDING_MODEL=text-embedding-004
GOOGLE_VERTEX_LLAMA_LOCATION= ***YOUR-GOOGLE-VERTEX-LLAMA-LOCATION***
GOOGLE_VERTEX_LLAMA_ENDPOINT= ***YOUR-GOOGLE-VERTEX-LLAMA-ENDPOINT***
# Ollama Gemma Configuration
OLLAMA_GEMMA_TEXT_MODEL=gemma2:2b
OLLAMA_GEMMA_ENDPOINT=http://localhost:11434/api/generate
# Ollama Llama Configuration
OLLAMA_LLAMA_TEXT_MODEL=llama3.2:1b
OLLAMA_LLAMA_ENDPOINT=http://localhost:11434/api/generate
# OpenAI O1 Configuration
OPENAI_O1_TEXT_MODEL=o1-mini
OPENAI_O1_EMBEDDING_MODEL=text-embedding-3-small
OPENAI_O1_ENDPOINT= ***YOUR-OPENAI-O1-ENDPOINT***
OPENAI_O1_API_KEY= ***YOUR-OPENAI-O1-API-KEY***
# Streaming Responses
STREAM=true
# Temperature for Model Generation
TEMPERATURE=0.0
# Set Winston Log Level
WINSTON_LOGGER_LEVEL=silly # Change this to error, warn, info, http, verbose, debug, silly, etc. based on https://github.com/winstonjs/winston#logging-levels