Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -66,36 +66,36 @@ def fetch_stats():
|
|
66 |
|
67 |
model_types = ["adapter", "finetune", "merge", "quantized"]
|
68 |
base_models = [
|
69 |
-
"
|
70 |
-
"
|
71 |
-
"
|
72 |
-
"
|
73 |
-
"
|
74 |
-
"
|
75 |
-
"
|
76 |
-
"
|
77 |
-
"
|
78 |
-
"
|
79 |
-
"
|
80 |
-
"
|
81 |
-
"
|
82 |
-
"
|
83 |
-
"
|
84 |
-
"
|
85 |
-
"
|
86 |
-
"
|
87 |
-
"
|
88 |
-
"
|
89 |
-
"
|
90 |
-
"
|
91 |
-
"
|
92 |
-
"
|
93 |
-
"
|
94 |
-
"
|
95 |
-
"
|
96 |
-
"
|
97 |
-
"
|
98 |
-
"
|
99 |
]
|
100 |
|
101 |
derivative_stats = []
|
|
|
66 |
|
67 |
model_types = ["adapter", "finetune", "merge", "quantized"]
|
68 |
base_models = [
|
69 |
+
"Llama-3.3-70B-Instruct",
|
70 |
+
"Meta-Llama-3-70B-Instruct",
|
71 |
+
"Llama-3.1-70B-Instruct",
|
72 |
+
"Llama-3.1-405B-FP8",
|
73 |
+
"Llama-3.2-90B-Vision-Instruct",
|
74 |
+
"Llama-3.2-11B-Vision-Instruct",
|
75 |
+
"Llama-3.2-3B-Instruct-QLORA_INT4_EO8",
|
76 |
+
"Llama-3.2-3B-Instruct-SpinQuant_INT4_EO8",
|
77 |
+
"Llama-3.2-1B-Instruct-SpinQuant_INT4_EO8",
|
78 |
+
"Llama-3.2-1B-Instruct-QLORA_INT4_EO8",
|
79 |
+
"Llama-Guard-3-11B-Vision",
|
80 |
+
"Llama-3.2-1B",
|
81 |
+
"Llama-3.2-1B-Instruct",
|
82 |
+
"Llama-3.2-3B",
|
83 |
+
"Llama-3.2-3B-Instruct",
|
84 |
+
"Llama-3.1-8B",
|
85 |
+
"Llama-Guard-3-8B",
|
86 |
+
"Meta-Llama-3-70B",
|
87 |
+
"Meta-Llama-3-8B-Instruct",
|
88 |
+
"Meta-Llama-3-8B",
|
89 |
+
"Llama-3.2-90B-Vision",
|
90 |
+
"Llama-3.2-11B-Vision",
|
91 |
+
"Llama-Guard-3-1B",
|
92 |
+
"Llama-Guard-3-1B-INT4",
|
93 |
+
"Llama-3.1-405B-Instruct-FP8",
|
94 |
+
"Llama-3.1-405B-Instruct",
|
95 |
+
"Llama-3.1-405B",
|
96 |
+
"Llama-3.1-70B",
|
97 |
+
"Llama-3.1-8B-Instruct",
|
98 |
+
"Llama-Guard-3-8B-INT8"
|
99 |
]
|
100 |
|
101 |
derivative_stats = []
|