name
stringlengths
8
58
hf_id
stringlengths
12
49
author
stringlengths
3
21
providers
listlengths
1
16
median_input_cost
float64
0
75
median_output_cost
float64
0
150
low_input_cost
float64
0
75
low_output_cost
float64
0
150
high_input_cost
float64
0
75
high_output_cost
float64
0
150
is_open_weights
bool
2 classes
Perplexity: Sonar Reasoning Pro
null
perplexity-ai
[ { "name": "Perplexity", "context": 128000, "max_output": 128000, "input": 2, "output": 8, "latency": 15.84, "throughput": 45.76 } ]
2
8
2
8
2
8
false
Perplexity: Sonar Pro
null
perplexity-ai
[ { "name": "Perplexity", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 2.34, "throughput": 57.7 } ]
3
15
3
15
3
15
false
Perplexity: Sonar Deep Research
null
perplexity-ai
[ { "name": "Perplexity", "context": 200000, "max_output": 200000, "input": 2, "output": 8, "latency": 19.42, "throughput": 43.61 } ]
2
8
2
8
2
8
false
OpenAI: GPT-4.5 (Preview)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 16000, "input": 75, "output": 150, "latency": 1.73, "throughput": 14.8 } ]
75
150
75
150
75
150
false
Google: Gemini 2.0 Flash Lite
null
Google
[ { "name": "Google Vertex", "context": 1050000, "max_output": 8000, "input": 0.075, "output": 0.3, "latency": 0.7, "throughput": 170.9 }, { "name": "Google AI Studio", "context": 1050000, "max_output": 8000, "input": 0.075, "output": 0.3, "latency": 0.6, "throughput": 187.9 } ]
0.075
0.3
0.075
0.3
0.075
0.3
false
Anthropic: Claude 3.7 Sonnet (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 128000, "input": 3, "output": 15, "latency": 1.42, "throughput": 55.27 } ]
3
15
3
15
3
15
false
Anthropic: Claude 3.7 Sonnet
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 128000, "input": 3, "output": 15, "latency": 1.42, "throughput": 58.03 }, { "name": "Amazon Bedrock", "context": 200000, "max_output": 128000, "input": 3, "output": 15, "latency": 1.24, "throughput": 37.11 }, { "name": "Google Vertex", "context": 200000, "max_output": 64000, "input": 3, "output": 15, "latency": 1.9, "throughput": 54.04 }, { "name": "Google Vertex (Europe)", "context": 200000, "max_output": 64000, "input": 3, "output": 15, "latency": 2.24, "throughput": 60.08 } ]
3
15
3
15
3
15
false
Anthropic: Claude 3.7 Sonnet (thinking)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 128000, "input": 3, "output": 15, "latency": 1.42, "throughput": 57.92 }, { "name": "Amazon Bedrock", "context": 200000, "max_output": 128000, "input": 3, "output": 15, "latency": 1.24, "throughput": 37.03 }, { "name": "Google Vertex", "context": 200000, "max_output": 64000, "input": 3, "output": 15, "latency": 1.9, "throughput": 54.06 }, { "name": "Google Vertex (Europe)", "context": 200000, "max_output": 64000, "input": 3, "output": 15, "latency": 2.24, "throughput": 60.08 } ]
3
15
3
15
3
15
false
Mistral: Saba
null
mistralai
[ { "name": "Mistral", "context": 32000, "max_output": 32000, "input": 0.2, "output": 0.6, "latency": 0.33, "throughput": 85.73 }, { "name": "Groq", "context": 33000, "max_output": 33000, "input": 0.79, "output": 0.79, "latency": 0.31, "throughput": 319.9 } ]
0.79
0.79
0.2
0.6
0.79
0.79
false
OpenAI: o3 Mini High
null
OpenAI
[ { "name": "OpenAI", "context": 200000, "max_output": 100000, "input": 1.1, "output": 4.4, "latency": 9.33, "throughput": 118.9 } ]
1.1
4.4
1.1
4.4
1.1
4.4
false
Google: Gemini Flash 2.0
null
Google
[ { "name": "Google AI Studio", "context": 1000000, "max_output": 8000, "input": 0.1, "output": 0.4, "latency": 0.5, "throughput": 165.8 }, { "name": "Google Vertex", "context": 1000000, "max_output": 8000, "input": 0.15, "output": 0.6, "latency": 0.52, "throughput": 166.5 } ]
0.15
0.6
0.1
0.4
0.15
0.6
false
Qwen: Qwen VL Plus
null
Qwen
[ { "name": "Alibaba", "context": 8000, "max_output": 2000, "input": 0.21, "output": 0.63, "latency": 0.34, "throughput": 100.2 } ]
0.21
0.63
0.21
0.63
0.21
0.63
false
AionLabs: Aion-1.0
null
AionLabs
[ { "name": "AionLabs", "context": 33000, "max_output": 33000, "input": 4, "output": 8, "latency": 1.31, "throughput": 75.7 } ]
4
8
4
8
4
8
false
AionLabs: Aion-RP 1.0 (8B)
null
AionLabs
[ { "name": "AionLabs", "context": 33000, "max_output": 33000, "input": 0.2, "output": 0.2, "latency": 0.9, "throughput": 41.82 } ]
0.2
0.2
0.2
0.2
0.2
0.2
false
Qwen: Qwen VL Max
null
Qwen
[ { "name": "Alibaba", "context": 8000, "max_output": 2000, "input": 0.8, "output": 3.2, "latency": 1.2, "throughput": 32.68 } ]
0.8
3.2
0.8
3.2
0.8
3.2
false
Qwen: Qwen-Turbo
null
Qwen
[ { "name": "Alibaba", "context": 1000000, "max_output": 8000, "input": 0.05, "output": 0.2, "latency": 0.71, "throughput": 108.6 } ]
0.05
0.2
0.05
0.2
0.05
0.2
false
Qwen: Qwen-Plus
null
Qwen
[ { "name": "Alibaba", "context": 131000, "max_output": 8000, "input": 0.4, "output": 1.2, "latency": 1.26, "throughput": 34.82 } ]
0.4
1.2
0.4
1.2
0.4
1.2
false
Qwen: Qwen-Max
null
Qwen
[ { "name": "Alibaba", "context": 33000, "max_output": 8000, "input": 1.6, "output": 6.4, "latency": 1.41, "throughput": 36.22 } ]
1.6
6.4
1.6
6.4
1.6
6.4
false
OpenAI: o3 Mini
null
OpenAI
[ { "name": "OpenAI", "context": 200000, "max_output": 100000, "input": 1.1, "output": 4.4, "latency": 6, "throughput": 109.7 } ]
1.1
4.4
1.1
4.4
1.1
4.4
false
Perplexity: Sonar Reasoning
null
perplexity-ai
[ { "name": "Perplexity", "context": 127000, "max_output": 127000, "input": 1, "output": 5, "latency": 2.18, "throughput": 92.25 } ]
1
5
1
5
1
5
false
Perplexity: Sonar
null
perplexity-ai
[ { "name": "Perplexity", "context": 127000, "max_output": 127000, "input": 1, "output": 1, "latency": 1.85, "throughput": 43.94 } ]
1
1
1
1
1
1
false
Liquid: LFM 7B
null
Liquid
[ { "name": "Liquid", "context": 33000, "max_output": 33000, "input": 0.01, "output": 0.01, "latency": 0.64, "throughput": 52.25 } ]
0.01
0.01
0.01
0.01
0.01
0.01
false
Liquid: LFM 3B
null
Liquid
[ { "name": "Liquid", "context": 33000, "max_output": 33000, "input": 0.02, "output": 0.02, "latency": 0.35, "throughput": 39.38 } ]
0.02
0.02
0.02
0.02
0.02
0.02
false
Mistral: Codestral 2501
null
mistralai
[ { "name": "Mistral", "context": 256000, "max_output": 256000, "input": 0.3, "output": 0.9, "latency": 0.26, "throughput": 178.9 } ]
0.3
0.9
0.3
0.9
0.3
0.9
false
OpenAI: o1
null
OpenAI
[ { "name": "OpenAI", "context": 200000, "max_output": 100000, "input": 15, "output": 60, "latency": 8.88, "throughput": 38.1 } ]
15
60
15
60
15
60
false
xAI: Grok 2 Vision 1212
null
xAI
[ { "name": "xAI", "context": 33000, "max_output": 33000, "input": 2, "output": 10, "latency": 0.39, "throughput": 49.45 } ]
2
10
2
10
2
10
false
xAI: Grok 2 1212
null
xAI
[ { "name": "xAI", "context": 131000, "max_output": 131000, "input": 2, "output": 10, "latency": 0.22, "throughput": 52.06 } ]
2
10
2
10
2
10
false
Cohere: Command R7B (12-2024)
null
Cohere
[ { "name": "Cohere", "context": 128000, "max_output": 4000, "input": 0.0375, "output": 0.15, "latency": 0.27, "throughput": 168.6 } ]
0.0375
0.15
0.0375
0.15
0.0375
0.15
false
Amazon: Nova Lite 1.0
null
Amazon
[ { "name": "Amazon Bedrock", "context": 300000, "max_output": 5000, "input": 0.06, "output": 0.24, "latency": 0.16, "throughput": 76.55 } ]
0.06
0.24
0.06
0.24
0.06
0.24
false
Amazon: Nova Micro 1.0
null
Amazon
[ { "name": "Amazon Bedrock", "context": 128000, "max_output": 5000, "input": 0.035, "output": 0.14, "latency": 0.21, "throughput": 209.2 } ]
0.035
0.14
0.035
0.14
0.035
0.14
false
Amazon: Nova Pro 1.0
null
Amazon
[ { "name": "Amazon Bedrock", "context": 300000, "max_output": 5000, "input": 0.8, "output": 3.2, "latency": 0.22, "throughput": 69.02 } ]
0.8
3.2
0.8
3.2
0.8
3.2
false
OpenAI: GPT-4o (2024-11-20)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 16000, "input": 2.5, "output": 10, "latency": 0.4, "throughput": 77.96 } ]
2.5
10
2.5
10
2.5
10
false
Mistral Large 2411
null
Mistral Large 2411
[ { "name": "Mistral", "context": 128000, "max_output": 128000, "input": 2, "output": 6, "latency": 0.44, "throughput": 51.91 } ]
2
6
2
6
2
6
false
Mistral Large 2407
null
Mistral Large 2407
[ { "name": "Mistral", "context": 128000, "max_output": 128000, "input": 2, "output": 6, "latency": 0.71, "throughput": 44.51 } ]
2
6
2
6
2
6
false
Mistral: Pixtral Large 2411
null
mistralai
[ { "name": "Mistral", "context": 128000, "max_output": 128000, "input": 2, "output": 6, "latency": 1.05, "throughput": 41.35 } ]
2
6
2
6
2
6
false
xAI: Grok Vision Beta
null
xAI
[ { "name": "xAI", "context": 8000, "max_output": 8000, "input": 5, "output": 15, "latency": 0.51, "throughput": 60.58 } ]
5
15
5
15
5
15
false
Anthropic: Claude 3.5 Haiku (2024-10-22) (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.64, "throughput": 59.35 } ]
0.8
4
0.8
4
0.8
4
false
Anthropic: Claude 3.5 Haiku (2024-10-22)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.64, "throughput": 59.34 }, { "name": "Google Vertex", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.5, "throughput": 64.78 } ]
0.8
4
0.8
4
0.8
4
false
Anthropic: Claude 3.5 Haiku (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.65, "throughput": 55.92 } ]
0.8
4
0.8
4
0.8
4
false
Anthropic: Claude 3.5 Haiku
null
Anthropic
[ { "name": "Amazon Bedrock", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.14, "throughput": 51.68 }, { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.66, "throughput": 55.83 }, { "name": "Amazon Bedrock", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.07, "throughput": 52.51 }, { "name": "Google Vertex", "context": 200000, "max_output": 8000, "input": 0.8, "output": 4, "latency": 1.53, "throughput": 53.87 } ]
0.8
4
0.8
4
0.8
4
false
Anthropic: Claude 3.5 Sonnet (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.72, "throughput": 53.86 } ]
3
15
3
15
3
15
false
Anthropic: Claude 3.5 Sonnet
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.71, "throughput": 53.99 }, { "name": "Google Vertex", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.59, "throughput": 57.82 }, { "name": "Amazon Bedrock", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.55, "throughput": 40.21 }, { "name": "Amazon Bedrock", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.49, "throughput": 40.24 }, { "name": "Google Vertex", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.22, "throughput": 58.16 } ]
3
15
3
15
3
15
false
xAI: Grok Beta
null
xAI
[ { "name": "xAI", "context": 131000, "max_output": 131000, "input": 5, "output": 15, "latency": 0.26, "throughput": 59.3 } ]
5
15
5
15
5
15
false
Mistral: Ministral 8B
null
mistralai
[ { "name": "Mistral", "context": 128000, "max_output": 128000, "input": 0.1, "output": 0.1, "latency": 0.3, "throughput": 128.2 } ]
0.1
0.1
0.1
0.1
0.1
0.1
false
Mistral: Ministral 3B
null
mistralai
[ { "name": "Mistral", "context": 128000, "max_output": 128000, "input": 0.04, "output": 0.04, "latency": 0.19, "throughput": 200.4 } ]
0.04
0.04
0.04
0.04
0.04
0.04
false
Inflection: Inflection 3 Pi
null
Inflection
[ { "name": "Inflection", "context": 8000, "max_output": 1000, "input": 2.5, "output": 10, "latency": 1.22, "throughput": 24.06 } ]
2.5
10
2.5
10
2.5
10
false
Inflection: Inflection 3 Productivity
null
Inflection
[ { "name": "Inflection", "context": 8000, "max_output": 8000, "input": 2.5, "output": 10, "latency": 2.57, "throughput": 25.93 } ]
2.5
10
2.5
10
2.5
10
false
Google: Gemini Flash 1.5 8B
null
Google
[ { "name": "Google AI Studio", "context": 1000000, "max_output": 8000, "input": 0.0375, "output": 0.15, "latency": 0.45, "throughput": 381.5 } ]
0.0375
0.15
0.0375
0.15
0.0375
0.15
false
Liquid: LFM 40B MoE
null
Liquid
[ { "name": "Liquid", "context": 33000, "max_output": 33000, "input": 0.15, "output": 0.15, "latency": 0.55, "throughput": 171.5 }, { "name": "Lambda", "context": 66000, "max_output": 66000, "input": 0.15, "output": 0.15, "latency": 0.46, "throughput": 233.9 } ]
0.15
0.15
0.15
0.15
0.15
0.15
false
OpenAI: o1-mini (2024-09-12)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 66000, "input": 1.1, "output": 4.4, "latency": 0.77, "throughput": 149.4 } ]
1.1
4.4
1.1
4.4
1.1
4.4
false
OpenAI: o1-preview
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 33000, "input": 15, "output": 60, "latency": 1.57, "throughput": 96.46 } ]
15
60
15
60
15
60
false
OpenAI: o1-preview (2024-09-12)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 33000, "input": 15, "output": 60, "latency": 1.07, "throughput": 114.3 } ]
15
60
15
60
15
60
false
OpenAI: o1-mini
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 66000, "input": 1.1, "output": 4.4, "latency": 0.76, "throughput": 124.9 } ]
1.1
4.4
1.1
4.4
1.1
4.4
false
Cohere: Command R (08-2024)
null
Cohere
[ { "name": "Cohere", "context": 128000, "max_output": 4000, "input": 0.1425, "output": 0.57, "latency": 0.46, "throughput": 51.72 } ]
0.1425
0.57
0.1425
0.57
0.1425
0.57
false
Cohere: Command R+ (08-2024)
null
Cohere
[ { "name": "Cohere", "context": 128000, "max_output": 4000, "input": 2.375, "output": 9.5, "latency": 0.57, "throughput": 42.71 } ]
2.375
9.5
2.375
9.5
2.375
9.5
false
Google: Gemini Flash 1.5 8B Experimental
null
Google
[ { "name": "Google AI Studio", "context": 1000000, "max_output": 8000, "input": 0, "output": 0, "latency": 0.52, "throughput": 225.1 } ]
0
0
0
0
0
0
false
AI21: Jamba 1.5 Large
null
AI21
[ { "name": "AI21", "context": 256000, "max_output": 4000, "input": 2, "output": 8, "latency": 0.22, "throughput": 50.92 } ]
2
8
2
8
2
8
false
AI21: Jamba 1.5 Mini
null
AI21
[ { "name": "AI21", "context": 256000, "max_output": 4000, "input": 0.2, "output": 0.4, "latency": 0.47, "throughput": 184.4 } ]
0.2
0.4
0.2
0.4
0.2
0.4
false
OpenAI: ChatGPT-4o
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 16000, "input": 5, "output": 15, "latency": 0.36, "throughput": 93.07 } ]
5
15
5
15
5
15
false
OpenAI: GPT-4o (2024-08-06)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 16000, "input": 2.5, "output": 10, "latency": 0.49, "throughput": 46.03 }, { "name": "Azure", "context": 128000, "max_output": 16000, "input": 2.5, "output": 10, "latency": 0.72, "throughput": 76.92 } ]
2.5
10
2.5
10
2.5
10
false
Perplexity: Llama 3.1 Sonar 8B
null
perplexity-ai
[ { "name": "Perplexity", "context": 131000, "max_output": 131000, "input": 0.2, "output": 0.2, "latency": 0.49, "throughput": 198.3 } ]
0.2
0.2
0.2
0.2
0.2
0.2
false
Perplexity: Llama 3.1 Sonar 70B
null
perplexity-ai
[ { "name": "Perplexity", "context": 131000, "max_output": 131000, "input": 1, "output": 1, "latency": 0.42, "throughput": 49.69 } ]
1
1
1
1
1
1
false
Perplexity: Llama 3.1 Sonar 70B Online
null
perplexity-ai
[ { "name": "Perplexity", "context": 127000, "max_output": 127000, "input": 1, "output": 1, "latency": 1.69, "throughput": 49.71 } ]
1
1
1
1
1
1
false
Perplexity: Llama 3.1 Sonar 8B Online
null
perplexity-ai
[ { "name": "Perplexity", "context": 127000, "max_output": 127000, "input": 0.2, "output": 0.2, "latency": 1.22, "throughput": 187.4 } ]
0.2
0.2
0.2
0.2
0.2
0.2
false
OpenAI: GPT-4o-mini
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 16000, "input": 0.15, "output": 0.6, "latency": 0.44, "throughput": 73.35 }, { "name": "Azure", "context": 128000, "max_output": 16000, "input": 0.15, "output": 0.6, "latency": 1.2, "throughput": 139 } ]
0.15
0.6
0.15
0.6
0.15
0.6
false
OpenAI: GPT-4o-mini (2024-07-18)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 16000, "input": 0.15, "output": 0.6, "latency": 0.39, "throughput": 74.07 } ]
0.15
0.6
0.15
0.6
0.15
0.6
false
01.AI: Yi Large
null
01.AI
[ { "name": "Fireworks", "context": 33000, "max_output": 4000, "input": 3, "output": 3, "latency": 0.89, "throughput": 84.28 } ]
3
3
3
3
3
3
false
AI21: Jamba Instruct
null
AI21
[ { "name": "AI21", "context": 256000, "max_output": 4000, "input": 0.5, "output": 0.7, "latency": 0.46, "throughput": 179.5 } ]
0.5
0.7
0.5
0.7
0.5
0.7
false
Anthropic: Claude 3.5 Sonnet (2024-06-20) (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.13, "throughput": 61.2 } ]
3
15
3
15
3
15
false
Anthropic: Claude 3.5 Sonnet (2024-06-20)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.13, "throughput": 61.2 }, { "name": "Google Vertex", "context": 200000, "max_output": 8000, "input": 3, "output": 15, "latency": 1.08, "throughput": 62.71 } ]
3
15
3
15
3
15
false
Google: Gemini Flash 1.5
null
Google
[ { "name": "Google Vertex", "context": 1000000, "max_output": 8000, "input": 0.075, "output": 0.3, "latency": 0.33, "throughput": 152.3 }, { "name": "Google AI Studio", "context": 1000000, "max_output": 8000, "input": 0.075, "output": 0.3, "latency": 0.5, "throughput": 155.5 } ]
0.075
0.3
0.075
0.3
0.075
0.3
false
OpenAI: GPT-4o (2024-05-13)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 4000, "input": 5, "output": 15, "latency": 0.48, "throughput": 98.36 }, { "name": "Azure", "context": 128000, "max_output": 4000, "input": 5, "output": 15, "latency": 2.26, "throughput": 110.2 } ]
5
15
5
15
5
15
false
OpenAI: GPT-4o
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 16000, "input": 2.5, "output": 10, "latency": 0.49, "throughput": 46.51 }, { "name": "Azure", "context": 128000, "max_output": 16000, "input": 2.5, "output": 10, "latency": 1.38, "throughput": 74.86 } ]
2.5
10
2.5
10
2.5
10
false
OpenAI: GPT-4o (extended)
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 64000, "input": 6, "output": 18, "latency": 0.86, "throughput": 114.4 } ]
6
18
6
18
6
18
false
Google: Gemini Pro 1.5
null
Google
[ { "name": "Google Vertex", "context": 2000000, "max_output": 8000, "input": 1.25, "output": 5, "latency": 1.08, "throughput": 67.52 }, { "name": "Google AI Studio", "context": 2000000, "max_output": 8000, "input": 1.25, "output": 5, "latency": 1.06, "throughput": 97.29 } ]
1.25
5
1.25
5
1.25
5
false
OpenAI: GPT-4 Turbo
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 4000, "input": 10, "output": 30, "latency": 1.04, "throughput": 41.95 } ]
10
30
10
30
10
30
false
Cohere: Command R+
null
Cohere
[ { "name": "Cohere", "context": 128000, "max_output": 4000, "input": 2.85, "output": 14.25, "latency": 0.51, "throughput": 64.18 } ]
2.85
14.25
2.85
14.25
2.85
14.25
false
Cohere: Command R+ (04-2024)
null
Cohere
[ { "name": "Cohere", "context": 128000, "max_output": 4000, "input": 2.85, "output": 14.25, "latency": 0.78, "throughput": 63.29 } ]
2.85
14.25
2.85
14.25
2.85
14.25
false
Cohere: Command
null
Cohere
[ { "name": "Cohere", "context": 4000, "max_output": 4000, "input": 0.95, "output": 1.9, "latency": 0.48, "throughput": 29.61 } ]
0.95
1.9
0.95
1.9
0.95
1.9
false
Cohere: Command R
null
Cohere
[ { "name": "Cohere", "context": 128000, "max_output": 4000, "input": 0.475, "output": 1.425, "latency": 0.21, "throughput": 143.1 } ]
0.475
1.425
0.475
1.425
0.475
1.425
false
Anthropic: Claude 3 Haiku (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 0.25, "output": 1.25, "latency": 0.65, "throughput": 152.7 } ]
0.25
1.25
0.25
1.25
0.25
1.25
false
Anthropic: Claude 3 Haiku
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 0.25, "output": 1.25, "latency": 0.65, "throughput": 152.7 }, { "name": "Google Vertex", "context": 200000, "max_output": 4000, "input": 0.25, "output": 1.25, "latency": 1.19, "throughput": 152.7 } ]
0.25
1.25
0.25
1.25
0.25
1.25
false
Anthropic: Claude 3 Opus (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 15, "output": 75, "latency": 1.68, "throughput": 30.15 } ]
15
75
15
75
15
75
false
Anthropic: Claude 3 Opus
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 15, "output": 75, "latency": 1.68, "throughput": 30.15 }, { "name": "Google Vertex", "context": 200000, "max_output": 4000, "input": 15, "output": 75, "latency": 2.39, "throughput": 30.22 } ]
15
75
15
75
15
75
false
Anthropic: Claude 3 Sonnet (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 3, "output": 15, "latency": 0.97, "throughput": 51.49 } ]
3
15
3
15
3
15
false
Anthropic: Claude 3 Sonnet
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 3, "output": 15, "latency": 0.97, "throughput": 51.49 }, { "name": "Google Vertex", "context": 200000, "max_output": 4000, "input": 3, "output": 15, "latency": 1.95, "throughput": 50.65 } ]
3
15
3
15
3
15
false
Cohere: Command R (03-2024)
null
Cohere
[ { "name": "Cohere", "context": 128000, "max_output": 4000, "input": 0.475, "output": 1.425, "latency": 0.39, "throughput": 126.8 } ]
0.475
1.425
0.475
1.425
0.475
1.425
false
Mistral Large
null
Mistral Large
[ { "name": "Mistral", "context": 128000, "max_output": 128000, "input": 2, "output": 6, "latency": 0.46, "throughput": 44.85 }, { "name": "Azure", "context": 128000, "max_output": 128000, "input": 3, "output": 9, "latency": 0.65, "throughput": 39.84 } ]
3
9
2
6
3
9
false
OpenAI: GPT-3.5 Turbo (older v0613)
null
OpenAI
[ { "name": "Azure", "context": 4000, "max_output": 4000, "input": 1, "output": 2, "latency": 0.12, "throughput": 106.1 } ]
1
2
1
2
1
2
false
OpenAI: GPT-4 Turbo Preview
null
OpenAI
[ { "name": "OpenAI", "context": 128000, "max_output": 4000, "input": 10, "output": 30, "latency": 0.74, "throughput": 36.29 } ]
10
30
10
30
10
30
false
Mistral Small
null
Mistral Small
[ { "name": "Mistral", "context": 32000, "max_output": 32000, "input": 0.2, "output": 0.6, "latency": 0.39, "throughput": 77.93 } ]
0.2
0.6
0.2
0.6
0.2
0.6
false
Mistral Tiny
null
Mistral Tiny
[ { "name": "Mistral", "context": 32000, "max_output": 32000, "input": 0.25, "output": 0.25, "latency": 0.28, "throughput": 129.4 } ]
0.25
0.25
0.25
0.25
0.25
0.25
false
Mistral Medium
null
Mistral Medium
[ { "name": "Mistral", "context": 32000, "max_output": 32000, "input": 2.75, "output": 8.1, "latency": 0.66, "throughput": 32.13 } ]
2.75
8.1
2.75
8.1
2.75
8.1
false
Google: Gemini Pro Vision 1.0
null
Google
[ { "name": "Google Vertex", "context": 16000, "max_output": 2000, "input": 0.5, "output": 1.5, "latency": 3.04, "throughput": 197.3 } ]
0.5
1.5
0.5
1.5
0.5
1.5
false
Google: Gemini Pro 1.0
null
Google
[ { "name": "Google Vertex", "context": 33000, "max_output": 8000, "input": 0.5, "output": 1.5, "latency": 0.78, "throughput": 104.7 } ]
0.5
1.5
0.5
1.5
0.5
1.5
false
Anthropic: Claude v2 (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 8, "output": 24, "latency": 2.24, "throughput": 15.91 } ]
8
24
8
24
8
24
false
Anthropic: Claude v2
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 8, "output": 24, "latency": 2.24, "throughput": 15.91 } ]
8
24
8
24
8
24
false
Anthropic: Claude v2.1 (self-moderated)
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 8, "output": 24, "latency": 1.19, "throughput": 16 } ]
8
24
8
24
8
24
false
Anthropic: Claude v2.1
null
Anthropic
[ { "name": "Anthropic", "context": 200000, "max_output": 4000, "input": 8, "output": 24, "latency": 1.19, "throughput": 16 } ]
8
24
8
24
8
24
false
OpenAI: GPT-3.5 Turbo 16k (older v1106)
null
OpenAI
[ { "name": "OpenAI", "context": 16000, "max_output": 4000, "input": 1, "output": 2, "latency": 0.39, "throughput": 161.7 } ]
1
2
1
2
1
2
false