Steven10429 commited on
Commit
05fc347
·
1 Parent(s): 3e36b23

finally worked

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -209,7 +209,8 @@ def quantize(model_path, repo_id, quant_method=None):
209
 
210
  def create_readme(repo_name, base_model_name, lora_model_name, quant_methods):
211
  readme_path = os.path.join("output", repo_name, "README.md")
212
- readme_template = """---tags:
 
213
  - autotrain
214
  - text-generation-inference
215
  - text-generation
@@ -284,7 +285,7 @@ def process_model(base_model_name, lora_model_name, repo_name, quant_methods, hf
284
 
285
 
286
  # 量化模型
287
- for quant_method in quant_methods:
288
  quantize(output_dir, repo_name, quant_method=quant_method)
289
 
290
  create_readme(repo_name, base_model_name, lora_model_name, quant_methods)
@@ -338,7 +339,7 @@ def create_ui():
338
  )
339
  quant_method = gr.CheckboxGroup(
340
  choices=["Q2_K", "Q4_K", "IQ4_NL", "Q5_K_M", "Q6_K", "Q8_0"],
341
- value=["Q4_K", "Q8_0"],
342
  label="Quantization Method"
343
  )
344
  hf_token = gr.Textbox(
 
209
 
210
  def create_readme(repo_name, base_model_name, lora_model_name, quant_methods):
211
  readme_path = os.path.join("output", repo_name, "README.md")
212
+ readme_template = """---
213
+ tags:
214
  - autotrain
215
  - text-generation-inference
216
  - text-generation
 
285
 
286
 
287
  # 量化模型
288
+ for quant_method in quant_methods[::-1]:
289
  quantize(output_dir, repo_name, quant_method=quant_method)
290
 
291
  create_readme(repo_name, base_model_name, lora_model_name, quant_methods)
 
339
  )
340
  quant_method = gr.CheckboxGroup(
341
  choices=["Q2_K", "Q4_K", "IQ4_NL", "Q5_K_M", "Q6_K", "Q8_0"],
342
+ value=["Q2_K", "Q4_K", "IQ4_NL", "Q5_K_M", "Q6_K", "Q8_0"],
343
  label="Quantization Method"
344
  )
345
  hf_token = gr.Textbox(